From 2998e4a41ca96932ac14cd9dc7c4af04805f0135 Mon Sep 17 00:00:00 2001
From: Markus Holzer <markus.holzer@fau.de>
Date: Wed, 1 Dec 2021 16:25:54 +0100
Subject: [PATCH] First example working

---
 pystencils/__init__.py                        |   5 +-
 pystencils/assignment.py                      |  17 +-
 pystencils/astnodes.py                        |   6 +-
 pystencils/backends/cbackend.py               |   2 +-
 pystencils/config.py                          | 149 ++++++++++++++++++
 pystencils/cpu/kernelcreation.py              |  47 +++---
 pystencils/gpucuda/kernelcreation.py          |   3 +-
 pystencils/kernel_contrains_check.py          |   4 +-
 pystencils/kernel_decorator.py                |   6 +-
 pystencils/kernelcreation.py                  | 129 +--------------
 pystencils/transformations.py                 |   5 +-
 pystencils/typing/cast_functions.py           |   4 +-
 pystencils/typing/leaf_typing.py              |  51 +++---
 pystencils/typing/transformations.py          |  26 +++
 pystencils/typing/types.py                    |   9 +-
 pystencils/typing/utilities.py                | 107 +------------
 pystencils_tests/test_abs.py                  |   7 +-
 pystencils_tests/test_astnodes.py             |   4 +-
 ...est_create_kernel_backwards_compability.py |   5 +-
 pystencils_tests/test_create_kernel_config.py |   9 +-
 pystencils_tests/test_simplifications.py      |   6 +-
 pystencils_tests/test_source_code_comment.py  |   3 +-
 pystencils_tests/test_sum_prod.py             |  10 +-
 pystencils_tests/test_types.py                |  72 +++++++--
 pystencils_tests/test_vectorization.py        |   8 +-
 .../test_vectorization_specific.py            |  16 +-
 26 files changed, 369 insertions(+), 341 deletions(-)
 create mode 100644 pystencils/config.py
 create mode 100644 pystencils/typing/transformations.py

diff --git a/pystencils/__init__.py b/pystencils/__init__.py
index 56b9c9e5c..f9d64fa00 100644
--- a/pystencils/__init__.py
+++ b/pystencils/__init__.py
@@ -7,9 +7,10 @@ from pystencils.typing.typed_sympy import TypedSymbol
 from .datahandling import create_data_handling
 from .display_utils import get_code_obj, get_code_str, show_code, to_dot
 from .field import Field, FieldType, fields
+from .config import CreateKernelConfig
 from .kernel_decorator import kernel, kernel_config
 from .kernelcreation import (
-    CreateKernelConfig, create_domain_kernel, create_indexed_kernel, create_kernel, create_staggered_kernel)
+    create_domain_kernel, create_indexed_kernel, create_kernel, create_staggered_kernel)
 from .simp import AssignmentCollection
 from .slicing import make_slice
 from .spatial_coordinates import x_, x_staggered, x_staggered_vector, x_vector, y_, y_staggered, z_, z_staggered
@@ -18,8 +19,8 @@ from .sympyextensions import SymbolCreator
 __all__ = ['Field', 'FieldType', 'fields',
            'TypedSymbol',
            'make_slice',
-           'create_kernel', 'create_domain_kernel', 'create_indexed_kernel', 'create_staggered_kernel',
            'CreateKernelConfig',
+           'create_kernel', 'create_domain_kernel', 'create_indexed_kernel', 'create_staggered_kernel',
            'Target', 'Backend',
            'show_code', 'to_dot', 'get_code_obj', 'get_code_str',
            'AssignmentCollection',
diff --git a/pystencils/assignment.py b/pystencils/assignment.py
index 4e51cd4a7..9d570727f 100644
--- a/pystencils/assignment.py
+++ b/pystencils/assignment.py
@@ -10,11 +10,11 @@ def print_assignment_latex(printer, expr):
     """sympy cannot print Assignments as Latex. Thus, this function is added to the sympy Latex printer"""
     printed_lhs = printer.doprint(expr.lhs)
     printed_rhs = printer.doprint(expr.rhs)
-    return r"{printed_lhs} \leftarrow {printed_rhs}".format(printed_lhs=printed_lhs, printed_rhs=printed_rhs)
+    return fr"{printed_lhs} \leftarrow {printed_rhs}"
 
 
 def assignment_str(assignment):
-    return r"{lhs} ← {rhs}".format(lhs=assignment.lhs, rhs=assignment.rhs)
+    return fr"{assignment.lhs} ← {assignment.rhs}"
 
 
 _old_new = sp.codegen.ast.Assignment.__new__
@@ -34,19 +34,6 @@ LatexPrinter._print_Assignment = print_assignment_latex
 sp.MutableDenseMatrix.__hash__ = lambda self: hash(tuple(self))
 
 
-# Apparently, in SymPy 1.4 Assignment.__hash__ is not implemented. This has been fixed in current master
-try:
-    sympy_version = sp.__version__.split('.')
-
-    if int(sympy_version[0]) <= 1 and int(sympy_version[1]) <= 4:
-        def hash_fun(self):
-            return hash((self.lhs, self.rhs))
-
-        Assignment.__hash__ = hash_fun
-except Exception:
-    pass
-
-
 def assignment_from_stencil(stencil_array, input_field, output_field,
                             normalization_factor=None, order='visual') -> Assignment:
     """Creates an assignment
diff --git a/pystencils/astnodes.py b/pystencils/astnodes.py
index 7b819db3c..7991f4780 100644
--- a/pystencils/astnodes.py
+++ b/pystencils/astnodes.py
@@ -6,7 +6,7 @@ from typing import Any, List, Optional, Sequence, Set, Union
 import sympy as sp
 
 import pystencils
-from pystencils.typing import create_type, get_next_parent_of_type, CastFunc
+from pystencils.typing.utilities import create_type, get_next_parent_of_type
 from pystencils.enums import Target, Backend
 from pystencils.field import Field
 from pystencils.typing.typed_sympy import FieldPointerSymbol, FieldShapeSymbol, FieldStrideSymbol, TypedSymbol
@@ -562,6 +562,7 @@ class LoopOverCoordinate(Node):
 
 class SympyAssignment(Node):
     def __init__(self, lhs_symbol, rhs_expr, is_const=True, use_auto=False):
+        # TODO add default type, float_type, int_type use sane defaults
         super(SympyAssignment, self).__init__(parent=None)
         self._lhs_symbol = sp.sympify(lhs_symbol)
         self.rhs = sp.sympify(rhs_expr)
@@ -570,8 +571,9 @@ class SympyAssignment(Node):
         self.use_auto = use_auto
 
     def __is_declaration(self):
+        from pystencils.typing import CastFunc
         if isinstance(self._lhs_symbol, CastFunc):
-            return False
+            assert False, f'{self._lhs_symbol} should not be a CastFunc'
         if any(isinstance(self._lhs_symbol, c) for c in (Field.Access, sp.Indexed, TemporaryMemoryAllocation)):
             return False
         return True
diff --git a/pystencils/backends/cbackend.py b/pystencils/backends/cbackend.py
index fa3079e32..ab35ff108 100644
--- a/pystencils/backends/cbackend.py
+++ b/pystencils/backends/cbackend.py
@@ -219,7 +219,7 @@ class CBackend:
                 return getattr(self, method_name)(node)
         raise NotImplementedError(f"{self.__class__.__name__} does not support node of type {node.__class__.__name__}")
 
-    def _print_Type(self, node):
+    def _print_AbstractType(self, node):
         return str(node)
 
     def _print_KernelFunction(self, node):
diff --git a/pystencils/config.py b/pystencils/config.py
new file mode 100644
index 000000000..97f2e2d8a
--- /dev/null
+++ b/pystencils/config.py
@@ -0,0 +1,149 @@
+import warnings
+from copy import copy
+from collections import defaultdict
+from dataclasses import dataclass, field
+from types import MappingProxyType
+from typing import Union, Tuple, List, Dict, Callable, Any
+
+from pystencils import Target, Backend, Field
+from pystencils.typing.typed_sympy import BasicType
+
+import numpy as np
+
+
+# TODO: think of more classes better usage, factory whatever ...
+@dataclass
+class CreateKernelConfig:
+    """
+    **Below all parameters for the CreateKernelConfig are explained**
+    """
+    target: Target = Target.CPU
+    """
+    All targets are defined in :class:`pystencils.enums.Target`
+    """
+    backend: Backend = None
+    """
+    All backends are defined in :class:`pystencils.enums.Backend`
+    """
+    function_name: str = 'kernel'
+    """
+    Name of the generated function - only important if generated code is written out
+    """
+    # TODO: config should check that the datatype is a Numpy type
+    # TODO: check for the python types and issue warnings
+    data_type: Union[str, Dict[str, BasicType]] = 'float64'
+    """
+    Data type used for all untyped symbols (i.e. non-fields), can also be a dict from symbol name to type
+    """
+    default_number_float: Union[str, np.dtype, BasicType] = 'float64'
+    """
+    Data type used for all untyped floating point numbers (i.e. 0.5)
+    """
+    default_number_int: Union[str, np.dtype, BasicType] = 'int64'
+    """
+    Data type used for all untyped integer numbers (i.e. 1)
+    """
+    iteration_slice: Tuple = None
+    """
+    Rectangular subset to iterate over, if not specified the complete non-ghost layer part of the field is iterated over
+    """
+    ghost_layers: Union[bool, int, List[Tuple[int]]] = None
+    """
+    A single integer specifies the ghost layer count at all borders, can also be a sequence of
+    pairs ``[(x_lower_gl, x_upper_gl), .... ]``. These layers are excluded from the iteration.
+    If left to default, the number of ghost layers is determined automatically from the assignments.
+    """
+    cpu_openmp: Union[bool, int] = False
+    """
+    `True` or number of threads for OpenMP parallelization, `False` for no OpenMP. If set to `True`, the maximum number
+    of available threads will be chosen.
+    """
+    cpu_vectorize_info: Dict = None
+    """
+    A dictionary with keys, 'vector_instruction_set', 'assume_aligned' and 'nontemporal'
+    for documentation of these parameters see vectorize function. Example:
+    '{'instruction_set': 'avx512', 'assume_aligned': True, 'nontemporal':True}'
+    """
+    cpu_blocking: Tuple[int] = None
+    """
+    A tuple of block sizes or `None` if no blocking should be applied
+    """
+    omp_single_loop: bool = True
+    """
+    If OpenMP is active: whether multiple outer loops are permitted
+    """
+    gpu_indexing: str = 'block'
+    """
+    Either 'block' or 'line' , or custom indexing class, see `AbstractIndexing`
+    """
+    gpu_indexing_params: MappingProxyType = field(default=MappingProxyType({}))
+    """
+    Dict with indexing parameters (constructor parameters of indexing class)
+    e.g. for 'block' one can specify '{'block_size': (20, 20, 10) }'.
+    """
+    default_assignment_simplifications: bool = False
+    """
+    If `True` default simplifications are first performed on the Assignments. If problems occur during the
+    simplification a warning will be thrown. 
+    Furthermore, it is essential to know that this is a two-stage process. The first stage of the process acts 
+    on the level of the `AssignmentCollection`.  In this part, `create_simplification_strategy` 
+    from pystencils.simplificationfactory will be used to apply optimisations like insertion of constants to 
+    remove pressure from the registers. Thus the first part of the optimisations can only be executed if 
+    an `AssignmentCollection` is passed. The second part of the optimisation acts on the level of each Assignment 
+    individually. In this stage, all optimisations from `sympy.codegen.rewriting.optims_c99` are applied 
+    to each Assignment. Thus this stage can also be applied if a list of Assignments is passed.
+    """
+    cpu_prepend_optimizations: List[Callable] = field(default_factory=list)
+    """
+    List of extra optimizations to perform first on the AST.
+    """
+    use_auto_for_assignments: bool = False
+    """
+    If set to `True`, auto can be used in the generated code for data types. This makes the type system more robust.
+    """
+    index_fields: List[Field] = None
+    """
+    List of index fields, i.e. 1D fields with struct data type. If not `None`, `create_index_kernel`
+    instead of `create_domain_kernel` is used.
+    """
+    coordinate_names: Tuple[str, Any] = ('x', 'y', 'z')
+    """
+    Name of the coordinate fields in the struct data type.
+    """
+    allow_double_writes: bool = False
+    """
+    If True, don't check if every field is only written at a single location. This is required
+    for example for kernels that are compiled with loop step sizes > 1, that handle multiple 
+    cells at once. Use with care!
+    """
+    skip_independence_check: bool = False
+    """
+    Don't check that loop iterations are independent. This is needed e.g. for 
+    periodicity kernel, that access the field outside the iteration bounds. Use with care!
+    """
+
+    def __post_init__(self):
+        # ----  Legacy parameters
+        # TODO adapt here the types
+        if isinstance(self.target, str):
+            new_target = Target[self.target.upper()]
+            warnings.warn(f'Target "{self.target}" as str is deprecated. Use {new_target} instead',
+                          category=DeprecationWarning)
+            self.target = new_target
+        # ---- Auto Backend
+        if not self.backend:
+            if self.target == Target.CPU:
+                self.backend = Backend.C
+            elif self.target == Target.GPU:
+                self.backend = Backend.CUDA
+            else:
+                raise NotImplementedError(f'Target {self.target} has no default backend')
+
+        #  Normalise data types
+        if not isinstance(self.data_type, dict):
+            dt = copy(self.data_type)  # The copy is necessary because BasicType has sympy shinanigans
+            self.data_type = defaultdict(lambda: BasicType(dt))
+        if not isinstance(self.default_number_float, BasicType):
+            self.default_number_float = BasicType(self.default_number_float)
+        if not isinstance(self.default_number_int, BasicType):
+            self.default_number_int = BasicType(self.default_number_int)
diff --git a/pystencils/cpu/kernelcreation.py b/pystencils/cpu/kernelcreation.py
index 77a4a7d79..b3817c8f5 100644
--- a/pystencils/cpu/kernelcreation.py
+++ b/pystencils/cpu/kernelcreation.py
@@ -5,22 +5,24 @@ import numpy as np
 
 import pystencils.astnodes as ast
 from pystencils.assignment import Assignment
+from pystencils.config import CreateKernelConfig
 from pystencils.enums import Target, Backend
 from pystencils.astnodes import Block, KernelFunction, LoopOverCoordinate, SympyAssignment
 from pystencils.cpu.cpujit import make_python_function
-from pystencils.typing import StructType, TypedSymbol, create_type, add_types
+from pystencils.typing import StructType, TypedSymbol, create_type
+from pystencils.typing.transformations import add_types
 from pystencils.field import Field, FieldType
 from pystencils.transformations import (
     filtered_tree_iteration, get_base_buffer_index, get_optimal_loop_ordering, make_loop_over_domain,
     move_constants_before_loop, parse_base_pointer_info, resolve_buffer_accesses,
     resolve_field_accesses, split_inner_loop)
 
+from pystencils.kernel_contrains_check import KernelConstraintsCheck
+
 AssignmentOrAstNodeList = List[Union[Assignment, ast.Node]]
 
 
-def create_kernel(assignments: AssignmentOrAstNodeList, function_name: str = "kernel", type_info='double',
-                  split_groups=(), iteration_slice=None, ghost_layers=None,
-                  skip_independence_check=False, allow_double_writes=False) -> KernelFunction:
+def create_kernel(assignments: AssignmentOrAstNodeList, config: CreateKernelConfig, split_groups) -> KernelFunction:
     """Creates an abstract syntax tree for a kernel function, by taking a list of update rules.
 
     Loops are created according to the field accesses in the equations.
@@ -28,26 +30,21 @@ def create_kernel(assignments: AssignmentOrAstNodeList, function_name: str = "ke
     Args:
         assignments: list of sympy equations, containing accesses to :class:`pystencils.field.Field`.
         Defining the update rules of the kernel
-        function_name: name of the generated function - only important if generated code is written out
-        type_info: a map from symbol name to a C type specifier. If not specified all symbols are assumed to
-                   be of type 'double' except symbols which occur on the left hand side of equations where the
-                   right hand side is a sympy Boolean which are assumed to be 'bool' .
+        config: create kernel config
         split_groups: Specification on how to split up inner loop into multiple loops. For details see
                       transformation :func:`pystencils.transformation.split_inner_loop`
-        iteration_slice: if not None, iteration is done only over this slice of the field
-        ghost_layers: a sequence of pairs for each coordinate with lower and upper nr of ghost layers
-                      that should be excluded from the iteration.
-                     if None, the number of ghost layers is determined automatically and assumed to be equal for a
-                     all dimensions
-        skip_independence_check: don't check that loop iterations are independent. This is needed e.g. for
-                                 periodicity kernel, that access the field outside the iteration bounds. Use with care!
-        allow_double_writes: If True, don't check if every field is only written at a single location. This is required
-                             for example for kernels that are compiled with loop step sizes > 1, that handle multiple 
-                             cells at once. Use with care!
 
     Returns:
         AST node representing a function, that can be printed as C or CUDA code
     """
+    function_name = config.function_name
+    type_info = config.data_type
+    iteration_slice = config.iteration_slice
+    ghost_layers = config.ghost_layers
+    skip_independence_check = config.skip_independence_check
+    allow_double_writes = config.allow_double_writes
+
+    # TODO: try to delete
     def type_symbol(term):
         if isinstance(term, Field.Access) or isinstance(term, TypedSymbol):
             return term
@@ -59,10 +56,15 @@ def create_kernel(assignments: AssignmentOrAstNodeList, function_name: str = "ke
         else:
             raise ValueError("Term has to be field access or symbol")
 
-    # TODO 1) check kernel -> do general checks elsewhere
-    # TODO 2) add leaf types
-    fields_read, fields_written, assignments = add_types(
-        assignments, type_info, not skip_independence_check, check_double_write_condition=not allow_double_writes)
+    check = KernelConstraintsCheck(check_independence_condition=skip_independence_check,
+                                   check_double_write_condition=allow_double_writes)
+    check.visit(assignments)
+
+    fields_read = check.fields_read
+    fields_written = check.fields_written
+
+    assignments = add_types(assignments, config)
+
     all_fields = fields_read.union(fields_written)
     read_only_fields = set([f.name for f in fields_read - fields_written])
 
@@ -76,6 +78,7 @@ def create_kernel(assignments: AssignmentOrAstNodeList, function_name: str = "ke
     ast_node = KernelFunction(loop_node, Target.CPU, Backend.C, compile_function=make_python_function,
                               ghost_layers=ghost_layer_info, function_name=function_name, assignments=assignments)
 
+    # TODO move split groups here
     if split_groups:
         typed_split_groups = [[type_symbol(s) for s in split_group] for split_group in split_groups]
         split_inner_loop(ast_node, typed_split_groups)
diff --git a/pystencils/gpucuda/kernelcreation.py b/pystencils/gpucuda/kernelcreation.py
index 96399ae1c..f6daa6697 100644
--- a/pystencils/gpucuda/kernelcreation.py
+++ b/pystencils/gpucuda/kernelcreation.py
@@ -1,7 +1,8 @@
 import numpy as np
 
 from pystencils.astnodes import Block, KernelFunction, LoopOverCoordinate, SympyAssignment
-from pystencils.typing import StructType, TypedSymbol, add_types
+from pystencils.typing import StructType, TypedSymbol
+from pystencils.typing.transformations import add_types
 from pystencils.field import Field, FieldType
 from pystencils.enums import Target, Backend
 from pystencils.gpucuda.cudajit import make_python_function
diff --git a/pystencils/kernel_contrains_check.py b/pystencils/kernel_contrains_check.py
index a2b0740c9..74f566666 100644
--- a/pystencils/kernel_contrains_check.py
+++ b/pystencils/kernel_contrains_check.py
@@ -41,9 +41,9 @@ class KernelConstraintsCheck:
         self.check_double_write_condition = check_double_write_condition
 
     def visit(self, obj):
-        if isinstance(obj, (list, tuple)):
+        if isinstance(obj, list) or isinstance(obj, tuple):
             [self.visit(e) for e in obj]
-        if isinstance(obj, (sp.Eq, ast.SympyAssignment, Assignment)):
+        elif isinstance(obj, (sp.Eq, ast.SympyAssignment, Assignment)):
             self.process_assignment(obj)
         elif isinstance(obj, ast.Conditional):
             self.scopes.push()
diff --git a/pystencils/kernel_decorator.py b/pystencils/kernel_decorator.py
index 19938e150..56453c5d4 100644
--- a/pystencils/kernel_decorator.py
+++ b/pystencils/kernel_decorator.py
@@ -7,7 +7,7 @@ import sympy as sp
 
 from pystencils.assignment import Assignment
 from pystencils.sympyextensions import SymbolCreator
-from pystencils.kernelcreation import CreateKernelConfig
+from pystencils.config import CreateKernelConfig
 
 __all__ = ['kernel', 'kernel_config']
 
@@ -89,8 +89,8 @@ def kernel_config(config: CreateKernelConfig, **kwargs) -> Callable[..., Dict]:
         decorator with config
 
     Examples:
-        >>> import pystencils as ps
-        >>> config = ps.CreateKernelConfig()
+import pystencils.kernel_creation_config        >>> import pystencils as ps
+        >>> config = pystencils.kernel_creation_config.CreateKernelConfig()
         >>> @kernel_config(config)
         ... def my_kernel(s):
         ...     f, g = ps.fields('f, g: [2D]')
diff --git a/pystencils/kernelcreation.py b/pystencils/kernelcreation.py
index a67b01e35..5ee7817f1 100644
--- a/pystencils/kernelcreation.py
+++ b/pystencils/kernelcreation.py
@@ -1,10 +1,9 @@
 import itertools
 import warnings
-from dataclasses import dataclass, field
-from types import MappingProxyType
-from typing import Callable, Union, List, Dict, Tuple, Any
+from typing import Union, List
 
 import sympy as sp
+from pystencils.config import CreateKernelConfig
 
 from pystencils.assignment import Assignment
 from pystencils.astnodes import Block, Conditional, LoopOverCoordinate, SympyAssignment
@@ -20,120 +19,6 @@ from pystencils.transformations import (
     loop_blocking, move_constants_before_loop, remove_conditionals_in_staggered_kernel)
 
 
-@dataclass
-class CreateKernelConfig:
-    """
-    **Below all parameters for the CreateKernelConfig are explained**
-    """
-    target: Target = Target.CPU
-    """
-    All targets are defined in :class:`pystencils.enums.Target`
-    """
-    backend: Backend = None
-    """
-    All backends are defined in :class:`pystencils.enums.Backend`
-    """
-    function_name: str = 'kernel'
-    """
-    Name of the generated function - only important if generated code is written out
-    """
-    # TODO: config should check that the datatype is a Numpy type
-    # TODO: check for the python types and issue warnings
-    data_type: Union[str, dict] = 'double'
-    """
-    Data type used for all untyped symbols (i.e. non-fields), can also be a dict from symbol name to type
-    """
-    iteration_slice: Tuple = None
-    """
-    Rectangular subset to iterate over, if not specified the complete non-ghost layer part of the field is iterated over
-    """
-    ghost_layers: Union[bool, int, List[Tuple[int]]] = None
-    """
-    A single integer specifies the ghost layer count at all borders, can also be a sequence of
-    pairs ``[(x_lower_gl, x_upper_gl), .... ]``. These layers are excluded from the iteration.
-    If left to default, the number of ghost layers is determined automatically from the assignments.
-    """
-    skip_independence_check: bool = False
-    """
-    Don't check that loop iterations are independent. This is needed e.g. for 
-    periodicity kernel, that access the field outside the iteration bounds. Use with care!
-    """
-    cpu_openmp: Union[bool, int] = False
-    """
-    `True` or number of threads for OpenMP parallelization, `False` for no OpenMP. If set to `True`, the maximum number
-    of available threads will be chosen.
-    """
-    cpu_vectorize_info: Dict = None
-    """
-    A dictionary with keys, 'vector_instruction_set', 'assume_aligned' and 'nontemporal'
-    for documentation of these parameters see vectorize function. Example:
-    '{'instruction_set': 'avx512', 'assume_aligned': True, 'nontemporal':True}'
-    """
-    cpu_blocking: Tuple[int] = None
-    """
-    A tuple of block sizes or `None` if no blocking should be applied
-    """
-    omp_single_loop: bool = True
-    """
-    If OpenMP is active: whether multiple outer loops are permitted
-    """
-    gpu_indexing: str = 'block'
-    """
-    Either 'block' or 'line' , or custom indexing class, see `AbstractIndexing`
-    """
-    gpu_indexing_params: MappingProxyType = field(default=MappingProxyType({}))
-    """
-    Dict with indexing parameters (constructor parameters of indexing class)
-    e.g. for 'block' one can specify '{'block_size': (20, 20, 10) }'.
-    """
-    default_assignment_simplifications: bool = False
-    """
-    If `True` default simplifications are first performed on the Assignments. If problems occur during the
-    simplification a warning will be thrown. 
-    Furthermore, it is essential to know that this is a two-stage process. The first stage of the process acts 
-    on the level of the `AssignmentCollection`.  In this part, `create_simplification_strategy` 
-    from pystencils.simplificationfactory will be used to apply optimisations like insertion of constants to 
-    remove pressure from the registers. Thus the first part of the optimisations can only be executed if 
-    an `AssignmentCollection` is passed. The second part of the optimisation acts on the level of each Assignment 
-    individually. In this stage, all optimisations from `sympy.codegen.rewriting.optims_c99` are applied 
-    to each Assignment. Thus this stage can also be applied if a list of Assignments is passed.
-    """
-    cpu_prepend_optimizations: List[Callable] = field(default_factory=list)
-    """
-    List of extra optimizations to perform first on the AST.
-    """
-    use_auto_for_assignments: bool = False
-    """
-    If set to `True`, auto can be used in the generated code for data types. This makes the type system more robust.
-    """
-    index_fields: List[Field] = None
-    """
-    List of index fields, i.e. 1D fields with struct data type. If not `None`, `create_index_kernel`
-    instead of `create_domain_kernel` is used.
-    """
-    coordinate_names: Tuple[str, Any] = ('x', 'y', 'z')
-    """
-    Name of the coordinate fields in the struct data type.
-    """
-
-    def __post_init__(self):
-        # ----  Legacy parameters
-        # TODO adapt here the types
-        if isinstance(self.target, str):
-            new_target = Target[self.target.upper()]
-            warnings.warn(f'Target "{self.target}" as str is deprecated. Use {new_target} instead',
-                          category=DeprecationWarning)
-            self.target = new_target
-        # ---- Auto Backend
-        if not self.backend:
-            if self.target == Target.CPU:
-                self.backend = Backend.C
-            elif self.target == Target.GPU:
-                self.backend = Backend.CUDA
-            else:
-                raise NotImplementedError(f'Target {self.target} has no default backend')
-
-
 def create_kernel(assignments: Union[Assignment, List[Assignment], AssignmentCollection, List[Conditional]], *,
                   config: CreateKernelConfig = None, **kwargs):
     """
@@ -222,6 +107,7 @@ def create_domain_kernel(assignments: List[Assignment], *, config: CreateKernelC
         warnings.warn(f"It was not possible to apply the default pystencils optimisations to the "
                       f"AssignmentCollection due to the following problem :{e}")
 
+    # TODO: shift to CPU
     # ----  Normalizing parameters
     split_groups = ()
     if isinstance(assignments, AssignmentCollection):
@@ -242,10 +128,7 @@ def create_domain_kernel(assignments: List[Assignment], *, config: CreateKernelC
         if config.backend == Backend.C:
             from pystencils.cpu import add_openmp, create_kernel
             # TODO: data type keyword should be unified to data_type
-            ast = create_kernel(assignments, function_name=config.function_name, type_info=config.data_type,
-                                split_groups=split_groups,
-                                iteration_slice=config.iteration_slice, ghost_layers=config.ghost_layers,
-                                skip_independence_check=config.skip_independence_check)
+            ast = create_kernel(assignments, config=config, split_groups=split_groups)
             for optimization in config.cpu_prepend_optimizations:
                 optimization(ast)
             omp_collapse = None
@@ -306,7 +189,7 @@ def create_indexed_kernel(assignments: List[Assignment], *, config: CreateKernel
         can be compiled with through its 'compile()' member
 
     Example:
-        >>> import pystencils as ps
+import pystencils.kernel_creation_config        >>> import pystencils as ps
         >>> import numpy as np
         >>>
         >>> # Index field stores the indices of the cell to visit together with optional values
@@ -317,7 +200,7 @@ def create_indexed_kernel(assignments: List[Assignment], *, config: CreateKernel
         >>> # Additional values  stored in index field can be accessed in the kernel as well
         >>> s, d = ps.fields('s, d: [2D]')
         >>> assignment = ps.Assignment(d[0, 0], 2 * s[0, 1] + 2 * s[1, 0] + idx_field('val'))
-        >>> kernel_config = ps.CreateKernelConfig(index_fields=[idx_field], coordinate_names=('x', 'y'))
+        >>> kernel_config = pystencils.kernel_creation_config.CreateKernelConfig(index_fields=[idx_field], coordinate_names=('x', 'y'))
         >>> kernel_ast = ps.create_indexed_kernel([assignment], config=kernel_config)
         >>> kernel = kernel_ast.compile()
         >>> d_arr = np.zeros([5, 5])
diff --git a/pystencils/transformations.py b/pystencils/transformations.py
index e47c80aa8..8fd4dfbc0 100644
--- a/pystencils/transformations.py
+++ b/pystencils/transformations.py
@@ -460,10 +460,7 @@ def resolve_field_accesses(ast_node, read_only_field_names=None,
             if field.name in field_to_base_pointer_info:
                 base_pointer_info = field_to_base_pointer_info[field.name]
             else:
-                base_pointer_info = [
-                    list(
-                        range(field.index_dimensions + field.spatial_dimensions))
-                ]
+                base_pointer_info = [list(range(field.index_dimensions + field.spatial_dimensions))]
 
             field_ptr = FieldPointerSymbol(
                 field.name,
diff --git a/pystencils/typing/cast_functions.py b/pystencils/typing/cast_functions.py
index 0c2da8d20..e93a410a8 100644
--- a/pystencils/typing/cast_functions.py
+++ b/pystencils/typing/cast_functions.py
@@ -16,6 +16,7 @@ class CastFunc(sp.Function):
             pass
         expr, dtype, *other_args = args
         if not isinstance(dtype, AbstractType):
+            raise NotImplementedError(f'{dtype} is not a subclass of AbstractType')
             dtype = create_type(dtype)
         # to work in conditions of sp.Piecewise cast_func has to be of type Boolean as well
         # however, a cast_function should only be a boolean if its argument is a boolean, otherwise this leads
@@ -25,7 +26,8 @@ class CastFunc(sp.Function):
         # rhs = cast_func(0, 'int')
         # print( sp.Ne(lhs, rhs) ) # would give true if all cast_funcs are booleans
         # -> thus a separate class boolean_cast_func is introduced
-        if isinstance(expr, Boolean) and (not isinstance(expr, TypedSymbol) or expr.dtype == BasicType(bool)):
+        # TODO check this
+        if isinstance(expr, Boolean) and (not isinstance(expr, TypedSymbol) or expr.dtype == BasicType('bool')):
             cls = BooleanCastFunc
 
         return sp.Function.__new__(cls, expr, dtype, *other_args, **kwargs)
diff --git a/pystencils/typing/leaf_typing.py b/pystencils/typing/leaf_typing.py
index b6ef0362f..6c51691ff 100644
--- a/pystencils/typing/leaf_typing.py
+++ b/pystencils/typing/leaf_typing.py
@@ -1,15 +1,17 @@
 from collections import namedtuple
 from typing import Union, Dict, Tuple, Any
+import logging
 
 import numpy as np
 
-import pystencils.integer_functions
 import sympy as sp
 
-from pystencils import astnodes as ast, TypedSymbol
-from pystencils.bit_masks import flag_cond
+from pystencils import astnodes as ast
 from pystencils.field import Field
-from pystencils.typing import AbstractType, BasicType, CastFunc, create_type, get_type_of_expression, collate_types
+from pystencils.typing.types import AbstractType, BasicType, create_type
+from pystencils.typing.utilities import get_type_of_expression, collate_types
+from pystencils.typing.cast_functions import CastFunc
+from pystencils.typing.typed_sympy import TypedSymbol
 from pystencils.utils import ContextVar
 from sympy.codegen import Assignment
 from sympy.logic.boolalg import BooleanFunction
@@ -33,18 +35,15 @@ class TypeAdder:
     """
     FieldAndIndex = namedtuple('FieldAndIndex', ['field', 'index'])
 
-    def __init__(self, default_symbol_type: BasicType, type_for_symbol: Dict[str, BasicType],
-                 default_number_float: BasicType, default_number_int: BasicType):
+    def __init__(self, type_for_symbol: Dict[str, BasicType], default_number_float: BasicType,
+                 default_number_int: BasicType):
         self.type_for_symbol = type_for_symbol
-        self.default_symbol_type = ContextVar(default_symbol_type)
         self.default_number_float = ContextVar(default_number_float)
         self.default_number_int = ContextVar(default_number_int)
 
-    def get_symbol_type(self, symbol: str) -> BasicType:
-        return self.type_for_symbol.get(symbol, self.default_symbol_type.get())
-
     # TODO: check if this adds only types to leave nodes of AST, get type info
     def visit(self, obj):
+
         if isinstance(obj, (list, tuple)):
             return [self.visit(e) for e in obj]
         if isinstance(obj, (sp.Eq, ast.SympyAssignment, Assignment)):
@@ -67,10 +66,14 @@ class TypeAdder:
 
     def process_assignment(self, assignment: Union[sp.Eq, ast.SympyAssignment, Assignment]) -> ast.SympyAssignment:
         # for checks it is crucial to process rhs before lhs to catch e.g. a = a + 1
-        new_rhs = self.process_expression(assignment.rhs)
-        # TODO check type rhs lhs
-        new_lhs = self.process_lhs(assignment.lhs)
-        return ast.SympyAssignment(new_lhs, new_rhs)
+        new_rhs, rhs_type = self.figure_out_type(assignment.rhs)
+        new_lhs, lhs_type = self.figure_out_type(assignment.lhs)
+        if lhs_type != rhs_type:
+            logging.warning(f'Lhs"{new_lhs} of type "{lhs_type}" is assigned with a different datatype '
+                            f'rhs: "{new_rhs}" of type "{rhs_type}".')
+            return ast.SympyAssignment(new_lhs, CastFunc(new_rhs, lhs_type))
+        else:
+            return ast.SympyAssignment(new_lhs, new_rhs)
 
     # Type System Specification
     # - Defined Types: TypedSymbol, Field, Field.Access, ...?
@@ -87,14 +90,16 @@ class TypeAdder:
     # - Mixture in expression with int and float
     # - Mixture in expression with uint64 and sint64
 
-    def figure_out_type(self, expr) -> Tuple[Any, BasicType]:  #TODO or abstract type?
+    def figure_out_type(self, expr) -> Tuple[Any, BasicType]:  # TODO or abstract type? vector type?
         # Trivial cases
+        from pystencils.field import Field
+
         if isinstance(expr, Field.Access):
             return expr, expr.dtype
         elif isinstance(expr, TypedSymbol):
             return expr, expr.dtype
         elif isinstance(expr, sp.Symbol):
-            t = TypedSymbol(expr.name, self.get_symbol_type(expr.name))  # TODO with or without name
+            t = TypedSymbol(expr.name, self.type_for_symbol[expr.name])  # TODO with or without name
             return t, t.dtype
         elif isinstance(expr, np.generic):
             assert False, f'Why do we have a np.generic in rhs???? {expr}'
@@ -103,11 +108,13 @@ class TypeAdder:
                 data_type = self.default_number_float.get()
             elif expr.is_Integer:
                 data_type = self.default_number_int.get()
+            else:
+                assert False, f'{sp.Number} is neither Float nor Integer'
             return expr, data_type
         # TODO add everything in between
         elif isinstance(expr, sp.Mul):
             # TODO can we ignore this and move it to general expr handling, i.e. removing Mul?
-            types = [self.figure_out_type(arg) for arg in expr.args if arg not in (-1, 1)]
+            args_types = [self.figure_out_type(arg) for arg in expr.args if arg not in (-1, 1)]
             return None  # TODO collate types
         elif isinstance(expr, sp.Indexed):
             self.apply_type(expr, BasicType('uintp'))  # TODO double check
@@ -116,14 +123,18 @@ class TypeAdder:
             # TODO sp.Pow should know a type
             return None  # TODO
         else:
-            types = [self.figure_out_type(arg) for arg in expr.args]
-            # TODO collate
-            return None  # TODO
+            args_types = [self.figure_out_type(arg) for arg in expr.args]
+            collated_type = collate_types([t for _, t in args_types])
+            new_args = [CastFunc(a, collated_type) if t != collated_type else a for a, t in args_types]
+            return expr.func(*new_args) if new_args else expr, collated_type
 
     def apply_type(self, expr, data_type: AbstractType):
         pass
 
     def process_expression(self, rhs, type_constants=True):  # TODO default_type as parameter
+        import pystencils.integer_functions
+        from pystencils.bit_masks import flag_cond
+
         if isinstance(rhs, Field.Access):
             return rhs
         elif isinstance(rhs, TypedSymbol):
diff --git a/pystencils/typing/transformations.py b/pystencils/typing/transformations.py
new file mode 100644
index 000000000..f5ddcfa42
--- /dev/null
+++ b/pystencils/typing/transformations.py
@@ -0,0 +1,26 @@
+from typing import List
+
+from pystencils.config import CreateKernelConfig
+from pystencils.typing.leaf_typing import TypeAdder
+from pystencils.typing import BasicType
+from sympy.codegen import Assignment
+
+
+def add_types(eqs: List[Assignment], config: CreateKernelConfig):
+    """Traverses AST and replaces every :class:`sympy.Symbol` by a :class:`pystencils.typedsymbol.TypedSymbol`.
+
+    Additionally returns sets of all fields which are read/written
+
+    Args:
+        eqs: list of equations
+        config: CreateKernelConfig
+
+    Returns:
+        ``typed_equations`` list of equations where symbols have been replaced by typed symbols
+    """
+
+    check = TypeAdder(type_for_symbol=config.data_type,
+                      default_number_float=config.default_number_float,
+                      default_number_int=config.default_number_int)
+
+    return check.visit(eqs)
diff --git a/pystencils/typing/types.py b/pystencils/typing/types.py
index 318b0932a..9ec46d5c1 100644
--- a/pystencils/typing/types.py
+++ b/pystencils/typing/types.py
@@ -9,7 +9,7 @@ import sympy.codegen.ast
 def is_supported_type(dtype: np.dtype):
     scalar = dtype.type
     c = np.issctype(dtype)
-    subclass = issubclass(scalar, np.floating) or issubclass(scalar, np.integer) or issubclass(scalar, np.bool)
+    subclass = issubclass(scalar, np.floating) or issubclass(scalar, np.integer) or issubclass(scalar, np.bool_)
     additional_checks = dtype.fields is None and dtype.hasobject is False and dtype.subdtype is None
     return c and subclass and additional_checks
 
@@ -68,11 +68,12 @@ class BasicType(AbstractType):
     # TODO: should be a sensible interface to np.dtype
 
     def __init__(self, dtype: Union[np.dtype, 'BasicType', str], const: bool = False):
-        self.const = const
         if isinstance(dtype, BasicType):
-            self.numpy_dtype = dtype.numpy_dtype  # TODO copy const as well??
+            self.numpy_dtype = dtype.numpy_dtype
+            self.const = dtype.const
         else:
             self.numpy_dtype = np.dtype(dtype)
+            self.const = const
         assert is_supported_type(self.numpy_dtype), f'Type {self.numpy_dtype} is currently not supported!'
 
     def __getnewargs__(self):
@@ -106,7 +107,7 @@ class BasicType(AbstractType):
         return issubclass(self.numpy_dtype.type, np.signedinteger)
 
     def is_bool(self):
-        return issubclass(self.numpy_dtype.type, np.bool)
+        return issubclass(self.numpy_dtype.type, np.bool_)
 
     @property
     def c_name(self) -> str:
diff --git a/pystencils/typing/utilities.py b/pystencils/typing/utilities.py
index 2f3d175da..7779017ce 100644
--- a/pystencils/typing/utilities.py
+++ b/pystencils/typing/utilities.py
@@ -1,11 +1,10 @@
 from collections import defaultdict
 from functools import partial
-from typing import Tuple, List, Dict
+from typing import Tuple, Union, Sequence
 
 import numpy as np
 import sympy as sp
 # from pystencils.typing.leaf_typing import TypeAdder  # TODO this should be leaf_typing
-from sympy.codegen import Assignment
 from sympy.logic.boolalg import Boolean, BooleanFunction
 
 import pystencils
@@ -92,13 +91,11 @@ def result_type(*args: np.dtype):
     return s[-1]
 
 
-def collate_types(types):
+def collate_types(types: Sequence[Union[BasicType, VectorType]]):
     """
     Takes a sequence of types and returns their "common type" e.g. (float, double, float) -> double
     Uses the collation rules from numpy.
     """
-    # TODO: use np.can_cast and np.promote_types and np.result_type and np.find_common_type
-
     # # Pointer arithmetic case i.e. pointer + integer is allowed
     # if any(type(t) is PointerType for t in types):
     #     pointer_type = None
@@ -115,7 +112,7 @@ def collate_types(types):
     #     return pointer_type
     #
     # # peel of vector types, if at least one vector type occurred the result will also be the vector type
-    # vector_type = [t for t in types if type(t) is VectorType]
+    vector_type = [t for t in types if isinstance(t, VectorType)]
     # if not all_equal(t.width for t in vector_type):
     #     raise ValueError("Collation failed because of vector types with different width")
     # types = [peel_off_type(t, VectorType) for t in types]
@@ -123,12 +120,10 @@ def collate_types(types):
     # now we should have a list of basic types - struct types are not yet supported
     assert all(type(t) is BasicType for t in types)
 
-    if any(t.is_float() for t in types):
-        types = tuple(t for t in types if t.is_float())
-    # use numpy collation -> create type from numpy type -> and, put vector type around if necessary
-    result_numpy_type = np.result_type(*(t.numpy_dtype for t in types))
+    result_numpy_type = result_type(*(t.numpy_dtype for t in types))
     result = BasicType(result_numpy_type)
-    # if vector_type:
+    if vector_type:
+        raise NotImplementedError("Vector type not implemented at the moment")
     #     result = VectorType(result, vector_type[0].width)
     return result
 
@@ -264,40 +259,6 @@ if int(sympy_version[0]) * 100 + int(sympy_version[1]) >= 109:
     sp.Basic.__reduce_ex__ = basic_reduce_ex
 
 
-def add_types(eqs: List[Assignment], type_for_symbol: Dict[sp.Symbol, np.dtype], check_independence_condition: bool,
-              check_double_write_condition: bool=True):
-    """Traverses AST and replaces every :class:`sympy.Symbol` by a :class:`pystencils.typedsymbol.TypedSymbol`.
-
-    Additionally returns sets of all fields which are read/written
-
-    Args:
-        eqs: list of equations
-        type_for_symbol: dict mapping symbol names to types. Types are strings of C types like 'int' or 'double'
-        check_independence_condition: check that loop iterations are independent - this has to be skipped for indexed
-                                      kernels
-
-    Returns:
-        ``fields_read, fields_written, typed_equations`` set of read fields, set of written fields,
-         list of equations where symbols have been replaced by typed symbols
-    """
-    if isinstance(type_for_symbol, (str, type)) or not hasattr(type_for_symbol, '__getitem__'):
-        type_for_symbol = typing_from_sympy_inspection(eqs, type_for_symbol)
-
-    type_for_symbol = adjust_c_single_precision_type(type_for_symbol)
-
-    # TODO what does this do????
-    # TODO: ask Martin
-    # TODO: use correct one/rename
-    check = KernelConstraintsCheck(type_for_symbol, check_independence_condition,
-                                   check_double_write_condition=check_double_write_condition)
-
-
-
-    typed_equations = visit(eqs)
-
-    return check.fields_read, check.fields_written, typed_equations
-
-
 def insert_casts(node):
     """Checks the types and inserts casts and pointer arithmetic where necessary.
 
@@ -393,19 +354,6 @@ def insert_casts(node):
     return node.func(*args)
 
 
-def adjust_c_single_precision_type(type_for_symbol):
-    """Replaces every occurrence of 'float' with 'single' to enforce the numpy single precision type."""
-    def single_factory():
-        return "single"
-
-    for symbol in type_for_symbol:
-        if type_for_symbol[symbol] == "float":
-            type_for_symbol[symbol] = single_factory()
-    if hasattr(type_for_symbol, "default_factory") and type_for_symbol.default_factory() == "float":
-        type_for_symbol.default_factory = single_factory
-    return type_for_symbol
-
-
 def get_next_parent_of_type(node, parent_type):
     """Returns the next parent node of given type or None, if root is reached.
 
@@ -427,46 +375,3 @@ def parents_of_type(node, parent_type, include_current=False):
         if isinstance(parent, parent_type):
             yield parent
         parent = parent.parent
-
-
-def typing_from_sympy_inspection(eqs, default_type="double", default_int_type='int64'):
-    """
-    Creates a default symbol name to type mapping.
-    If a sympy Boolean is assigned to a symbol it is assumed to be 'bool' otherwise the default type, usually ('double')
-
-    Args:
-        eqs: list of equations
-        default_type: the type for non-boolean symbols
-    Returns:
-        dictionary, mapping symbol name to type
-    """
-    from pystencils.astnodes import SympyAssignment, Conditional, Node
-    result = defaultdict(lambda: default_type)
-    if hasattr(default_type, 'numpy_dtype'):
-        result['_complex_type'] = (np.zeros((1,), default_type.numpy_dtype) * 1j).dtype
-    else:
-        result['_complex_type'] = (np.zeros((1,), default_type) * 1j).dtype
-    for eq in eqs:
-        if isinstance(eq, Conditional):
-            result.update(typing_from_sympy_inspection(eq.true_block.args))
-            if eq.false_block:
-                result.update(typing_from_sympy_inspection(
-                    eq.false_block.args))
-        elif isinstance(eq, Node) and not isinstance(eq, SympyAssignment):
-            continue
-        else:
-            from pystencils.cpu.vectorization import vec_all, vec_any
-            if isinstance(eq.rhs, (vec_all, vec_any)):
-                result[eq.lhs.name] = "bool"
-            # problematic case here is when rhs is a symbol: then it is impossible to decide here without
-            # further information what type the left hand side is - default fallback is the dict value then
-            if isinstance(eq.rhs, Boolean) and not isinstance(eq.rhs, sp.Symbol):
-                result[eq.lhs.name] = "bool"
-            try:
-                result[eq.lhs.name] = get_type_of_expression(eq.rhs,
-                                                             default_float_type=default_type,
-                                                             default_int_type=default_int_type,
-                                                             symbol_type_dict=result)
-            except Exception:
-                pass  # gracefully fail in case get_type_of_expression cannot determine type
-    return result
\ No newline at end of file
diff --git a/pystencils_tests/test_abs.py b/pystencils_tests/test_abs.py
index 7bf7a1a45..8f215adab 100644
--- a/pystencils_tests/test_abs.py
+++ b/pystencils_tests/test_abs.py
@@ -1,3 +1,4 @@
+import pystencils.config
 import sympy
 
 import pystencils as ps
@@ -9,11 +10,9 @@ def test_abs():
 
     default_int_type = create_type('int64')
 
-    assignments = ps.AssignmentCollection({
-        x[0, 0]: sympy.Abs(CastFunc(y[0, 0], default_int_type))
-    })
+    assignments = ps.AssignmentCollection({x[0, 0]: sympy.Abs(CastFunc(y[0, 0], default_int_type))})
 
-    config = ps.CreateKernelConfig(target=ps.Target.GPU)
+    config = pystencils.config.CreateKernelConfig(target=ps.Target.GPU)
     ast = ps.create_kernel(assignments, config=config)
     code = ps.get_code_str(ast)
     print(code)
diff --git a/pystencils_tests/test_astnodes.py b/pystencils_tests/test_astnodes.py
index 688f63ed9..18977cee5 100644
--- a/pystencils_tests/test_astnodes.py
+++ b/pystencils_tests/test_astnodes.py
@@ -1,5 +1,7 @@
 import pytest
 import sys
+
+import pystencils.config
 import sympy as sp
 
 import pystencils as ps
@@ -90,7 +92,7 @@ def test_loop_over_coordinate():
 def test_sympy_assignment(default_assignment_simplifications):
     assignment = SympyAssignment(dst[0, 0](0), sp.log(x + 3) / sp.log(2) + sp.log(x ** 2 + 1))
 
-    config = ps.CreateKernelConfig(default_assignment_simplifications=default_assignment_simplifications)
+    config = pystencils.config.CreateKernelConfig(default_assignment_simplifications=default_assignment_simplifications)
     ast = ps.create_kernel([assignment], config=config)
     code = ps.get_code_str(ast)
         
diff --git a/pystencils_tests/test_create_kernel_backwards_compability.py b/pystencils_tests/test_create_kernel_backwards_compability.py
index bb1c97715..53137e910 100644
--- a/pystencils_tests/test_create_kernel_backwards_compability.py
+++ b/pystencils_tests/test_create_kernel_backwards_compability.py
@@ -5,6 +5,9 @@ import numpy as np
 
 
 # This test aims to trigger deprication warnings. Thus the warnings should not be displayed in the warning summary.
+import pystencils.config
+
+
 def test_create_kernel_backwards_compatibility():
     size = (30, 20)
 
@@ -24,7 +27,7 @@ def test_create_kernel_backwards_compatibility():
         ast_string = ps.create_kernel(jacobi, target='cpu').compile()
     # noinspection PyTypeChecker
     with pytest.warns(DeprecationWarning):
-        ast_config = ps.create_kernel(jacobi, config=ps.CreateKernelConfig(target='cpu')).compile()
+        ast_config = ps.create_kernel(jacobi, config=pystencils.config.CreateKernelConfig(target='cpu')).compile()
     ast_enum(f=src_field_enum, d=dst_field_enum)
     ast_string(f=src_field_string, d=dst_field_string)
     ast_config(f=src_field_config, d=dst_field_config)
diff --git a/pystencils_tests/test_create_kernel_config.py b/pystencils_tests/test_create_kernel_config.py
index 86a1c0ca8..e8ad310c7 100644
--- a/pystencils_tests/test_create_kernel_config.py
+++ b/pystencils_tests/test_create_kernel_config.py
@@ -1,22 +1,23 @@
 import numpy as np
 import pystencils as ps
+import pystencils.config
 
 
 def test_create_kernel_config():
-    c = ps.CreateKernelConfig()
+    c = pystencils.config.CreateKernelConfig()
     assert c.backend == ps.Backend.C
     assert c.target == ps.Target.CPU
 
-    c = ps.CreateKernelConfig(target=ps.Target.GPU)
+    c = pystencils.config.CreateKernelConfig(target=ps.Target.GPU)
     assert c.backend == ps.Backend.CUDA
 
-    c = ps.CreateKernelConfig(backend=ps.Backend.CUDA)
+    c = pystencils.config.CreateKernelConfig(backend=ps.Backend.CUDA)
     assert c.target == ps.Target.CPU
     assert c.backend == ps.Backend.CUDA
 
 
 def test_kernel_decorator_config():
-    config = ps.CreateKernelConfig()
+    config = pystencils.config.CreateKernelConfig()
     a, b, c = ps.fields(a=np.ones(100), b=np.ones(100), c=np.ones(100))
 
     @ps.kernel_config(config)
diff --git a/pystencils_tests/test_simplifications.py b/pystencils_tests/test_simplifications.py
index 1c9ed3c0c..fbce59aca 100644
--- a/pystencils_tests/test_simplifications.py
+++ b/pystencils_tests/test_simplifications.py
@@ -1,5 +1,7 @@
 from sys import version_info as vs
 import pytest
+
+import pystencils.config
 import sympy as sp
 import pystencils as ps
 
@@ -156,7 +158,7 @@ def test_sympy_optimizations(target, simplification):
         src[0, 0]: 1.0 * (sp.exp(dst[0, 0]) - 1)
     })
 
-    config = ps.CreateKernelConfig(target=target, default_assignment_simplifications=simplification)
+    config = pystencils.config.CreateKernelConfig(target=target, default_assignment_simplifications=simplification)
     ast = ps.create_kernel(assignments, config=config)
 
     code = ps.get_code_str(ast)
@@ -179,7 +181,7 @@ def test_evaluate_constant_terms(target, simplification):
         src[0, 0]: -sp.cos(1) + dst[0, 0]
     })
 
-    config = ps.CreateKernelConfig(target=target, default_assignment_simplifications=simplification)
+    config = pystencils.config.CreateKernelConfig(target=target, default_assignment_simplifications=simplification)
     ast = ps.create_kernel(assignments, config=config)
     code = ps.get_code_str(ast)
     if simplification:
diff --git a/pystencils_tests/test_source_code_comment.py b/pystencils_tests/test_source_code_comment.py
index 79c25ae79..b1006a941 100644
--- a/pystencils_tests/test_source_code_comment.py
+++ b/pystencils_tests/test_source_code_comment.py
@@ -9,6 +9,7 @@
 """
 import pystencils
 import pystencils.astnodes
+import pystencils.config
 
 
 def test_source_code_comment():
@@ -19,7 +20,7 @@ def test_source_code_comment():
         {a.center(): b[0, 2] + b[0, 0]}, {}
     )
 
-    config = pystencils.CreateKernelConfig(target=pystencils.Target.CPU)
+    config = pystencils.config.CreateKernelConfig(target=pystencils.Target.CPU)
     ast = pystencils.create_kernel(assignments, config=config)
 
     ast.body.append(pystencils.astnodes.SourceCodeComment("Hallo"))
diff --git a/pystencils_tests/test_sum_prod.py b/pystencils_tests/test_sum_prod.py
index 235644db2..a2403eef7 100644
--- a/pystencils_tests/test_sum_prod.py
+++ b/pystencils_tests/test_sum_prod.py
@@ -9,6 +9,8 @@
 """
 import pytest
 import numpy as np
+
+import pystencils.config
 import sympy as sp
 import sympy.abc
 
@@ -29,7 +31,7 @@ def test_sum(default_assignment_simplifications):
 
     assignments = ps.AssignmentCollection({x.center(): sum})
 
-    config = ps.CreateKernelConfig(default_assignment_simplifications=default_assignment_simplifications)
+    config = pystencils.config.CreateKernelConfig(default_assignment_simplifications=default_assignment_simplifications)
     ast = ps.create_kernel(assignments, config=config)
     code = ps.get_code_str(ast)
     kernel = ast.compile()
@@ -58,8 +60,8 @@ def test_sum_use_float(default_assignment_simplifications):
 
     assignments = ps.AssignmentCollection({x.center(): sum})
 
-    config = ps.CreateKernelConfig(default_assignment_simplifications=default_assignment_simplifications,
-                                   data_type=create_type('float32'))
+    config = pystencils.config.CreateKernelConfig(default_assignment_simplifications=default_assignment_simplifications,
+                                                  data_type=create_type('float32'))
     ast = ps.create_kernel(assignments, config=config)
     code = ps.get_code_str(ast)
     kernel = ast.compile()
@@ -90,7 +92,7 @@ def test_product(default_assignment_simplifications):
 
     assignments = ps.AssignmentCollection({x.center(): sum})
 
-    config = ps.CreateKernelConfig(default_assignment_simplifications=default_assignment_simplifications)
+    config = pystencils.config.CreateKernelConfig(default_assignment_simplifications=default_assignment_simplifications)
 
     ast = ps.create_kernel(assignments, config=config)
     code = ps.get_code_str(ast)
diff --git a/pystencils_tests/test_types.py b/pystencils_tests/test_types.py
index 774306d8d..795e5c26c 100644
--- a/pystencils_tests/test_types.py
+++ b/pystencils_tests/test_types.py
@@ -1,9 +1,12 @@
+import pytest
+
+import pystencils.config
 import sympy as sp
 import numpy as np
 
 import pystencils as ps
 from pystencils.typing import TypedSymbol, get_type_of_expression, VectorType, collate_types, create_type, \
-    typed_symbols, CastFunc, PointerArithmeticFunc, PointerType, result_type
+    typed_symbols, CastFunc, PointerArithmeticFunc, PointerType, result_type, BasicType
 
 
 def test_result_type():
@@ -39,9 +42,52 @@ def test_result_type():
     assert result_type(b, d) == d
 
 
+@pytest.mark.parametrize('dtype', ('float64', 'float32', 'int64', 'int32', 'uint32', 'uint64'))
+def test_simple_add(dtype):
+    constant = 1.0
+    if dtype[0] in 'ui':
+        constant = 1
+    f = ps.fields(f"f: {dtype}[1D]")
+    d = TypedSymbol("d", dtype)
+
+    test_arr = np.array([constant], dtype=dtype)
+
+    ur = ps.Assignment(f[0], f[0] + d)
+
+    ast = ps.create_kernel(ur)
+    code = ps.get_code_str(ast)
+    kernel = ast.compile()
+    kernel(f=test_arr, d=constant)
+
+    assert test_arr[0] == constant+constant
+
+
+@pytest.mark.parametrize('dtype1', ('float64', 'float32', 'int64', 'int32', 'uint32', 'uint64'))
+@pytest.mark.parametrize('dtype2', ('float64', 'float32', 'int64', 'int32', 'uint32', 'uint64'))
+def test_mixed_add(dtype1, dtype2):
+
+    constant = 1
+    f = ps.fields(f"f: {dtype1}[1D]")
+    g = ps.fields(f"g: {dtype2}[1D]")
+
+    test_f = np.array([constant], dtype=dtype1)
+    test_g = np.array([constant], dtype=dtype2)
+
+    ur = ps.Assignment(f[0], f[0] + g[0])
+
+    # TODO Markus: check for the logging if colate_types(dtype1, dtype2) != dtype1
+    ast = ps.create_kernel(ur)
+    code = ps.get_code_str(ast)
+    kernel = ast.compile()
+    kernel(f=test_f, g=test_g)
+
+    assert test_f[0] == constant+constant
+
+
+# TODO redo following tests
 def test_collation():
-    double_type = create_type("double")
-    float_type = create_type("float32")
+    double_type = BasicType('float64')
+    float_type = BasicType('float32')
     double4_type = VectorType(double_type, 4)
     float4_type = VectorType(float_type, 4)
     assert collate_types([double_type, float_type]) == double_type
@@ -50,8 +96,8 @@ def test_collation():
 
 
 def test_vector_type():
-    double_type = create_type("double")
-    float_type = create_type("float32")
+    double_type = BasicType("double")
+    float_type = BasicType('float32')
     double4_type = VectorType(double_type, 4)
     float4_type = VectorType(float_type, 4)
 
@@ -62,8 +108,8 @@ def test_vector_type():
 
 
 def test_pointer_type():
-    double_type = create_type("double")
-    float_type = create_type("float32")
+    double_type = BasicType('float64')
+    float_type = BasicType('float32')
     double4_type = PointerType(double_type, restrict=True)
     float4_type = PointerType(float_type, restrict=False)
 
@@ -95,9 +141,9 @@ def test_assumptions():
     assert x.shape[0].is_nonnegative
     assert (2 * x.shape[0]).is_nonnegative
     assert (2 * x.shape[0]).is_integer
-    assert (TypedSymbol('a', create_type('uint64'))).is_nonnegative
-    assert (TypedSymbol('a', create_type('uint64'))).is_positive is None
-    assert (TypedSymbol('a', create_type('uint64')) + 1).is_positive
+    assert (TypedSymbol('a', BasicType('uint64'))).is_nonnegative
+    assert (TypedSymbol('a', BasicType('uint64'))).is_positive is None
+    assert (TypedSymbol('a', BasicType('uint64')) + 1).is_positive
     assert (x.shape[0] + 1).is_real
 
 
@@ -119,7 +165,7 @@ def test_sqrt_of_integer():
     assignments = [ps.Assignment(tmp, sp.sqrt(3)),
                    ps.Assignment(f[0], tmp)]
     arr_single = np.array([1], dtype=np.float32)
-    config = ps.CreateKernelConfig(data_type="float32")
+    config = pystencils.config.CreateKernelConfig(data_type="float32")
     kernel = ps.create_kernel(assignments, config=config).compile()
     kernel(f=arr_single)
 
@@ -196,7 +242,7 @@ def test_division():
     up = [ps.Assignment(tau, 1.0 / (0.5 + (3.0 * m))),
           ps.Assignment(f.center, tau)]
 
-    ast = ps.create_kernel(up, config=ps.CreateKernelConfig(data_type="float32"))
+    ast = ps.create_kernel(up, config=pystencils.config.CreateKernelConfig(data_type="float32"))
     code = ps.get_code_str(ast)
 
     assert "1.0f" in code
@@ -209,7 +255,7 @@ def test_pow():
     up = [ps.Assignment(tau, m ** 1.5),
           ps.Assignment(f.center, tau)]
 
-    ast = ps.create_kernel(up, config=ps.CreateKernelConfig(data_type="float32"))
+    ast = ps.create_kernel(up, config=pystencils.config.CreateKernelConfig(data_type="float32"))
     code = ps.get_code_str(ast)
 
     assert "1.5f" in code
diff --git a/pystencils_tests/test_vectorization.py b/pystencils_tests/test_vectorization.py
index ae4524fda..478022d32 100644
--- a/pystencils_tests/test_vectorization.py
+++ b/pystencils_tests/test_vectorization.py
@@ -1,4 +1,6 @@
 import numpy as np
+
+import pystencils.config
 import sympy as sp
 
 import pystencils as ps
@@ -48,7 +50,7 @@ def test_aligned_and_nt_stores(instruction_set=instruction_set, openmp=False):
     opt = {'instruction_set': instruction_set, 'assume_aligned': True, 'nontemporal': True,
            'assume_inner_stride_one': True}
     update_rule = [ps.Assignment(f.center(), 0.25 * (g[-1, 0] + g[1, 0] + g[0, -1] + g[0, 1]))]
-    config = ps.CreateKernelConfig(target=dh.default_target, cpu_vectorize_info=opt, cpu_openmp=openmp)
+    config = pystencils.config.CreateKernelConfig(target=dh.default_target, cpu_vectorize_info=opt, cpu_openmp=openmp)
     ast = ps.create_kernel(update_rule, config=config)
     if instruction_set in ['sse'] or instruction_set.startswith('avx'):
         assert 'stream' in ast.instruction_set
@@ -85,7 +87,7 @@ def test_inplace_update(instruction_set=instruction_set):
         f1 @= 2 * s.tmp0
         f2 @= 2 * s.tmp0
 
-    config = ps.CreateKernelConfig(cpu_vectorize_info={'instruction_set': instruction_set})
+    config = pystencils.config.CreateKernelConfig(cpu_vectorize_info={'instruction_set': instruction_set})
     ast = ps.create_kernel(update_rule, config=config)
     kernel = ast.compile()
     kernel(f=arr)
@@ -290,7 +292,7 @@ def test_issue40(*_):
     eq = [ps.Assignment(sp.Symbol('rho'), 1.0),
           ps.Assignment(src[0, 0](0), sp.Rational(4, 9) * sp.Symbol('rho'))]
 
-    config = ps.CreateKernelConfig(target=Target.CPU, cpu_vectorize_info=opt, data_type='float64')
+    config = pystencils.config.CreateKernelConfig(target=Target.CPU, cpu_vectorize_info=opt, data_type='float64')
     ast = ps.create_kernel(eq, config=config)
 
     code = ps.get_code_str(ast)
diff --git a/pystencils_tests/test_vectorization_specific.py b/pystencils_tests/test_vectorization_specific.py
index b13d8bc28..13bb412f0 100644
--- a/pystencils_tests/test_vectorization_specific.py
+++ b/pystencils_tests/test_vectorization_specific.py
@@ -1,6 +1,8 @@
 import pytest
 
 import numpy as np
+
+import pystencils.config
 import sympy as sp
 
 import pystencils as ps
@@ -28,7 +30,7 @@ def test_vectorisation_varying_arch(instruction_set):
         f1 @= 2 * s.tmp0
         f2 @= 2 * s.tmp0
 
-    config = ps.CreateKernelConfig(cpu_vectorize_info={'instruction_set': instruction_set})
+    config = pystencils.config.CreateKernelConfig(cpu_vectorize_info={'instruction_set': instruction_set})
     ast = ps.create_kernel(update_rule, config=config)
     kernel = ast.compile()
     kernel(f=arr)
@@ -47,7 +49,7 @@ def test_vectorized_abs(instruction_set, dtype):
     f, g = ps.fields(f=arr, g=arr)
     update_rule = [ps.Assignment(g.center(), sp.Abs(f.center()))]
 
-    config = ps.CreateKernelConfig(cpu_vectorize_info={'instruction_set': instruction_set})
+    config = pystencils.config.CreateKernelConfig(cpu_vectorize_info={'instruction_set': instruction_set})
     ast = ps.create_kernel(update_rule, config=config)
 
     func = ast.compile()
@@ -63,12 +65,12 @@ def test_strided(instruction_set, dtype):
     update_rule = [ps.Assignment(g[0, 0], f[0, 0] + f[-1, 0] + f[1, 0] + f[0, 1] + f[0, -1] + 42.0)]
     if 'storeS' not in get_vector_instruction_set(dtype, instruction_set) and not instruction_set in ['avx512', 'rvv'] and not instruction_set.startswith('sve'):
         with pytest.warns(UserWarning) as warn:
-            config = ps.CreateKernelConfig(cpu_vectorize_info={'instruction_set': instruction_set})
+            config = pystencils.config.CreateKernelConfig(cpu_vectorize_info={'instruction_set': instruction_set})
             ast = ps.create_kernel(update_rule, config=config)
             assert 'Could not vectorize loop' in warn[0].message.args[0]
     else:
         with pytest.warns(None) as warn:
-            config = ps.CreateKernelConfig(cpu_vectorize_info={'instruction_set': instruction_set})
+            config = pystencils.config.CreateKernelConfig(cpu_vectorize_info={'instruction_set': instruction_set})
             ast = ps.create_kernel(update_rule, config=config)
             assert len(warn) == 0
     func = ast.compile()
@@ -99,7 +101,7 @@ def test_alignment_and_correct_ghost_layers(gl_field, gl_kernel, instruction_set
     update_rule = ps.Assignment(dst[0, 0], src[0, 0])
     opt = {'instruction_set': instruction_set, 'assume_aligned': True,
            'nontemporal': True, 'assume_inner_stride_one': True}
-    config = ps.CreateKernelConfig(target=dh.default_target, cpu_vectorize_info=opt, ghost_layers=gl_kernel)
+    config = pystencils.config.CreateKernelConfig(target=dh.default_target, cpu_vectorize_info=opt, ghost_layers=gl_kernel)
     ast = ps.create_kernel(update_rule, config=config)
     kernel = ast.compile()
     if gl_kernel != gl_field:
@@ -135,8 +137,8 @@ def test_vectorization_other(instruction_set, function):
 @pytest.mark.parametrize('instruction_set', supported_instruction_sets)
 @pytest.mark.parametrize('field_layout', ('fzyx', 'zyxf'))
 def test_square_root(dtype, instruction_set, field_layout):
-    config = ps.CreateKernelConfig(data_type=dtype,
-                                   cpu_vectorize_info={'instruction_set': instruction_set,
+    config = pystencils.config.CreateKernelConfig(data_type=dtype,
+                                                  cpu_vectorize_info={'instruction_set': instruction_set,
                                                        'assume_inner_stride_one': True,
                                                        'assume_aligned': False, 'nontemporal': False})
 
-- 
GitLab