diff --git a/src/pystencils/__init__.py b/src/pystencils/__init__.py
index 51556f79927b634cd64e51b2ef5a8c6547ddb96e..a01bad607420edb18d3a7bf0d05ccecc71c348bb 100644
--- a/src/pystencils/__init__.py
+++ b/src/pystencils/__init__.py
@@ -4,8 +4,8 @@ from . import fd
 from . import stencil as stencil
 from .display_utils import get_code_obj, get_code_str, show_code, to_dot
 from .field import Field, FieldType, fields
-from .config import CreateKernelConfig
 from .cache import clear_cache
+from .config import CreateKernelConfig
 from .kernel_decorator import kernel, kernel_config
 from .kernelcreation import create_kernel
 from .slicing import make_slice
diff --git a/src/pystencils/backend/kernelcreation/freeze.py b/src/pystencils/backend/kernelcreation/freeze.py
index 94b660fd9135c5d59c00596425737cf5a793391a..ba4b7ac8299cf497376ebf6e4faf23a264af4f66 100644
--- a/src/pystencils/backend/kernelcreation/freeze.py
+++ b/src/pystencils/backend/kernelcreation/freeze.py
@@ -104,7 +104,6 @@ class FreezeExpressions(SympyToPymbolicMapper):
                     assert len(offsets) == 1  # must have been checked by the context
                     offsets = [offsets[0] + sparse_ispace.sparse_counter]
                 case FieldType.BUFFER:
-                    #   TODO: Test Cases
                     ispace = self._ctx.get_full_iteration_space()
                     compressed_ctr = ispace.compressed_counter()
                     assert len(offsets) == 1
diff --git a/src/pystencils/backend/kernelcreation/iteration_space.py b/src/pystencils/backend/kernelcreation/iteration_space.py
index 147a4749e9c07acf88674ba74e626f6b5a5e1763..dd91511607557c7d68ff1c6c853e70d076d267ed 100644
--- a/src/pystencils/backend/kernelcreation/iteration_space.py
+++ b/src/pystencils/backend/kernelcreation/iteration_space.py
@@ -155,7 +155,7 @@ class FullIterationSpace(IterationSpace):
         """Expression counting the actual number of items processed at the iteration defined by the counter tuple.
         
         Used primarily for indexing buffers."""
-        actual_iters = [self.actual_iterations()]
+        actual_iters = [self.actual_iterations(d) for d in range(self.dim)]
         compressed_counters = [(dim.counter - dim.start) / dim.step for dim in self.dimensions]
         compressed_idx = compressed_counters[0]
         for ctr, iters in zip(compressed_counters[1:], actual_iters[1:]):
diff --git a/src/pystencils/backend/typed_expressions.py b/src/pystencils/backend/typed_expressions.py
index 62aaf1f50240ee494391b172c0dc05d88847c91a..a6f74fb824678192c219fbc02a4a09f3109fc555 100644
--- a/src/pystencils/backend/typed_expressions.py
+++ b/src/pystencils/backend/typed_expressions.py
@@ -119,7 +119,7 @@ class PsTypedConstant:
     def _fix(self, v: Any) -> PsTypedConstant:
         """In binary operations, checks for type equality and, if necessary, promotes the values
         ``0``, ``1`` and ``-1`` to `PsTypedConstant`."""
-        if not isinstance(v, PsTypedConstant) and v in (0, 1, -1):
+        if not isinstance(v, PsTypedConstant):
             return PsTypedConstant(v, self._dtype)
         elif v._dtype != self._dtype:
             raise PsTypeError(
@@ -131,7 +131,7 @@ class PsTypedConstant:
     def _rfix(self, v: Any) -> PsTypedConstant:
         """Same as `_fix`, but for use with the `r...` versions of the binary ops. Only changes the order of the
         types in the exception string."""
-        if not isinstance(v, PsTypedConstant) and v in (0, 1, -1):
+        if not isinstance(v, PsTypedConstant):
             return PsTypedConstant(v, self._dtype)
         elif v._dtype != self._dtype:
             raise PsTypeError(
@@ -141,6 +141,9 @@ class PsTypedConstant:
             return v
 
     def __add__(self, other: Any):
+        #   TODO: During freeze, expressions like `int + PsTypedConstant` can
+        #   occur. To cope with these, make the arithmetic operators of PsTypedConstant
+        #   purely symbolic? -> investigate
         if isinstance(other, pb.Expression):  # let pymbolic handle this case
             return NotImplemented
 
diff --git a/src/pystencils/config.py b/src/pystencils/config.py
index fdaed66658ca8d0a30779339c24153b3f9e8bc90..3c6b40c66fad94e0778a0741003885667781af19 100644
--- a/src/pystencils/config.py
+++ b/src/pystencils/config.py
@@ -1,207 +1,106 @@
-from copy import copy
-from collections import defaultdict
-from dataclasses import dataclass, field
-from types import MappingProxyType
-from typing import Union, Tuple, List, Dict, Callable, Any, DefaultDict, Iterable
+from typing import Sequence
+from dataclasses import dataclass
 
-from pystencils import Target, Backend, Field
-from .sympyextensions.typed_sympy import BasicType
+from .enums import Target
+from .field import Field, FieldType
 
-import numpy as np
+from .backend.jit import JitBase
+from .backend.exceptions import PsOptionsError
+from .backend.types import PsIntegerType, PsNumericType, PsIeeeFloatType
 
-# TODO: There exists DTypeLike in NumPy which would be better than type for type hinting, to new at the moment
-# from numpy.typing import DTypeLike
+from .backend.kernelcreation.defaults import Sympy as SpDefaults
 
+from .enums import Target
 
-# TODO: CreateKernelConfig is bloated think of more classes better usage, factory whatever ...
-# Proposition: CreateKernelConfigs Classes for different targets?
 @dataclass
 class CreateKernelConfig:
-    """
-    **Below all parameters for the CreateKernelConfig are explained**
-    """
+    """Options for create_kernel."""
+
     target: Target = Target.CPU
-    """
-    All targets are defined in :class:`pystencils.enums.Target`
-    """
-    backend: Backend = None
-    """
-    All backends are defined in :class:`pystencils.enums.Backend`
-    """
-    function_name: str = 'kernel'
-    """
-    Name of the generated function - only important if generated code is written out
-    """
-    data_type: Union[type, str, DefaultDict[str, BasicType], Dict[str, BasicType]] = np.float64
-    """
-    Data type used for all untyped symbols (i.e. non-fields), can also be a dict from symbol name to type.
-    If specified as a dict ideally a defaultdict is used to define a default value for symbols not listed in the
-    dict. If a plain dict is provided it will be transformed into a defaultdict internally. The default value 
-    will then be specified via type collation then.
-    """
-    default_number_float: Union[type, str, BasicType] = None
-    """
-    Data type used for all untyped floating point numbers (i.e. 0.5). By default the value of data_type is used.
-    If data_type is given as a defaultdict its default_factory is used.
-    """
-    default_number_int: Union[type, str, BasicType] = np.int64
-    """
-    Data type used for all untyped integer numbers (i.e. 1)
-    """
-    iteration_slice: Tuple = None
-    """
-    Rectangular subset to iterate over, if not specified the complete non-ghost layer part of the field is iterated over
-    """
-    ghost_layers: Union[bool, int, List[Tuple[int]]] = None
-    """
-    A single integer specifies the ghost layer count at all borders, can also be a sequence of
-    pairs ``[(x_lower_gl, x_upper_gl), .... ]``. These layers are excluded from the iteration.
-    If left to default, the number of ghost layers is determined automatically from the assignments.
-    """
-    cpu_openmp: Union[bool, int] = False
-    """
-    `True` or number of threads for OpenMP parallelization, `False` for no OpenMP. If set to `True`, the maximum number
-    of available threads will be chosen.
-    """
-    cpu_vectorize_info: Dict = None
-    """
-    A dictionary with keys, 'vector_instruction_set', 'assume_aligned' and 'nontemporal'
-    for documentation of these parameters see vectorize function. Example:
-    '{'instruction_set': 'avx512', 'assume_aligned': True, 'nontemporal':True}'
-    """
-    cpu_blocking: Tuple[int] = None
-    """
-    A tuple of block sizes or `None` if no blocking should be applied
-    """
-    omp_single_loop: bool = True
-    """
-    If OpenMP is active: whether multiple outer loops are permitted
-    """
-    base_pointer_specification: Union[List[Iterable[str]], List[Iterable[int]]] = None
-    """
-    Specification of how many and which intermediate pointers are created for a field access.
-    For example [ (0), (2,3,)]  creates on base pointer for coordinates 2 and 3 and writes the offset for coordinate
-    zero directly in the field access. These specifications are defined dependent on the loop ordering.
-    This function translates more readable version into the specification above.
+    """The code generation target.
     
-    For more information see: `pystencils.transformations.create_intermediate_base_pointer`
+    TODO: Enhance `Target` from enum to a larger target spec, e.g. including vectorization architecture, ...
     """
-    gpu_indexing: str = 'block'
-    """
-    Either 'block' or 'line' , or custom indexing class, see `pystencils.gpu.AbstractIndexing`
-    """
-    gpu_indexing_params: MappingProxyType = field(default_factory=lambda: MappingProxyType({}))
-    """
-    Dict with indexing parameters (constructor parameters of indexing class)
-    e.g. for 'block' one can specify '{'block_size': (20, 20, 10) }'.
-    """
-    # TODO Markus rework this docstring
-    default_assignment_simplifications: bool = False
-    """
-    If `True` default simplifications are first performed on the Assignments. If problems occur during the
-    simplification a warning will be thrown.
-    Furthermore, it is essential to know that this is a two-stage process. The first stage of the process acts
-    on the level of the `pystencils.AssignmentCollection`.  In this part,
-    `pystencil.simp.create_simplification_strategy` from pystencils.simplificationfactory will be used to
-    apply optimisations like insertion of constants to
-    remove pressure from the registers. Thus the first part of the optimisations can only be executed if
-    an `AssignmentCollection` is passed. The second part of the optimisation acts on the level of each Assignment
-    individually. In this stage, all optimisations from `sympy.codegen.rewriting.optims_c99` are applied
-    to each Assignment. Thus this stage can also be applied if a list of Assignments is passed.
-    """
-    cpu_prepend_optimizations: List[Callable] = field(default_factory=list)
-    """
-    List of extra optimizations to perform first on the AST.
-    """
-    use_auto_for_assignments: bool = False
-    """
-    If set to `True`, auto can be used in the generated code for data types. This makes the type system more robust.
-    """
-    index_fields: List[Field] = None
-    """
-    List of index fields, i.e. 1D fields with struct data type. If not `None`, `create_index_kernel`
-    instead of `create_domain_kernel` is used.
-    """
-    coordinate_names: Tuple[str, Any] = ('x', 'y', 'z')
-    """
-    Name of the coordinate fields in the struct data type.
-    """
-    allow_double_writes: bool = False
-    """
-    If True, don't check if every field is only written at a single location. This is required
-    for example for kernels that are compiled with loop step sizes > 1, that handle multiple
-    cells at once. Use with care!
-    """
-    skip_independence_check: bool = False
+
+    jit: JitBase | None = None
+    """Just-in-time compiler used to compile and load the kernel for invocation from the current Python environment.
+    
+    If left at `None`, a default just-in-time compiler will be inferred from the `target` parameter.
+    To explicitly disable JIT compilation, pass `nbackend.jit.no_jit`.
     """
-    Don't check that loop iterations are independent. This is needed e.g. for
-    periodicity kernel, that access the field outside the iteration bounds. Use with care!
+
+    function_name: str = "kernel"
+    """Name of the generated function"""
+
+    ghost_layers: None | int | Sequence[int | tuple[int, int]] = None
+    """Specifies the number of ghost layers of the iteration region.
+    
+    Options:
+     - `None`: Required ghost layers are inferred from field accesses
+     - `int`:  A uniform number of ghost layers in each spatial coordinate is applied
+     - `Sequence[int, tuple[int, int]]`: Ghost layers are specified for each spatial coordinate.
+        In each coordinate, a single integer specifies the ghost layers at both the lower and upper iteration limit,
+        while a pair of integers specifies the lower and upper ghost layers separately.
+
+    When manually specifying ghost layers, it is the user's responsibility to avoid out-of-bounds memory accesses.
+    If `ghost_layers=None` is specified, the iteration region may otherwise be set using the `iteration_slice` option.
     """
 
-    class DataTypeFactory:
-        """Because of pickle, we need to have a nested class, instead of a lambda in __post_init__"""
-        def __init__(self, dt):
-            self.dt = dt
+    iteration_slice: None | tuple[slice, ...] = None
+    """Specifies the kernel's iteration slice.
+    
+    `iteration_slice` may only be set if `ghost_layers = None`.
+    If it is set, a slice must be specified for each spatial coordinate.
+    TODO: Specification of valid slices and their behaviour
+    """
 
-        def __call__(self):
-            return BasicType(self.dt)
+    index_field: Field | None = None
+    """Index field for a sparse kernel.
+    
+    If this option is set, a sparse kernel with the given field as index field will be generated.
+    """
 
-    def _check_type(self, dtype_to_check):
-        if isinstance(dtype_to_check, str) and (dtype_to_check == 'float' or dtype_to_check == 'int'):
-            self._typing_error()
+    """Data Types"""
 
-        if isinstance(dtype_to_check, type) and not hasattr(dtype_to_check, "dtype"):
-            # NumPy-types are also of type 'type'. However, they have more properties
-            self._typing_error()
+    index_dtype: PsIntegerType = SpDefaults.index_dtype
+    """Data type used for all index calculations."""
 
-    @staticmethod
-    def _typing_error():
-        raise ValueError("It is not possible to use python types (float, int) for datatypes because these "
-                         "types are ambiguous. For example float will map to double. "
-                         "Also the string version like 'float' is not allowed, e.g. use 'float64' instead")
+    default_dtype: PsNumericType = PsIeeeFloatType(64)
+    """Default numeric data type.
+    
+    This data type will be applied to all untyped symbols.
+    """
 
     def __post_init__(self):
-        # ----  Legacy parameters
-        if not isinstance(self.target, Target):
-            raise ValueError("target must be provided by the 'Target' enum")
-
-        # ---- Auto Backend
-        if not self.backend:
-            if self.target == Target.CPU:
-                self.backend = Backend.C
-            elif self.target == Target.GPU:
-                self.backend = Backend.CUDA
-            else:
-                raise NotImplementedError(f'Target {self.target} has no default backend')
-
-        if not isinstance(self.backend, Backend):
-            raise ValueError("backend must be provided by the 'Backend' enum")
-
-        # Normalise data types
-        for dtype in [self.data_type, self.default_number_float, self.default_number_int]:
-            self._check_type(dtype)
-
-        if not isinstance(self.data_type, dict):
-            dt = copy(self.data_type)  # The copy is necessary because BasicType has sympy shinanigans
-            self.data_type = defaultdict(self.DataTypeFactory(dt))
-
-        if isinstance(self.data_type, dict) and not isinstance(self.data_type, defaultdict):
-            for dtype in self.data_type.values():
-                self._check_type(dtype)
-
-            dt = collate_types([BasicType(dtype) for dtype in self.data_type.values()])
-            dtype_dict = self.data_type
-            self.data_type = defaultdict(self.DataTypeFactory(dt), dtype_dict)
-
-        assert isinstance(self.data_type, defaultdict), "At this point data_type must be a defaultdict!"
-        for dtype in self.data_type.values():
-            self._check_type(dtype)
-        self._check_type(self.data_type.default_factory())
-
-        if self.default_number_float is None:
-            self.default_number_float = self.data_type.default_factory()
-
-        if not isinstance(self.default_number_float, BasicType):
-            self.default_number_float = BasicType(self.default_number_float)
-        if not isinstance(self.default_number_int, BasicType):
-            self.default_number_int = BasicType(self.default_number_int)
+        #   Check iteration space argument consistency
+        if (
+            int(self.iteration_slice is not None)
+            + int(self.ghost_layers is not None)
+            + int(self.index_field is not None)
+            > 1
+        ):
+            raise PsOptionsError(
+                "Parameters `iteration_slice`, `ghost_layers` and 'index_field` are mutually exclusive; "
+                "at most one of them may be set."
+            )
+
+        #   Check index field
+        if (
+            self.index_field is not None
+            and self.index_field.field_type != FieldType.INDEXED
+        ):
+            raise PsOptionsError(
+                "Only fields with `field_type == FieldType.INDEXED` can be specified as `index_field`"
+            )
+
+        #   Infer JIT
+        if self.jit is None:
+            match self.target:
+                case Target.CPU:
+                    from .backend.jit import LegacyCpuJit
+
+                    self.jit = LegacyCpuJit()
+                case _:
+                    raise NotImplementedError(
+                        f"No default JIT compiler implemented yet for target {self.target}"
+                    )
diff --git a/src/pystencils/display_utils.py b/src/pystencils/display_utils.py
index bce5d493c92399ed628042ab35308031dbea8fb6..bc63a3336dd081202aa1b025f6f93893490072bb 100644
--- a/src/pystencils/display_utils.py
+++ b/src/pystencils/display_utils.py
@@ -3,6 +3,7 @@ from typing import Any, Dict, Optional, Union
 import sympy as sp
 
 from pystencils.enums import Backend
+from pystencils.backend.ast import PsKernelFunction
 from pystencils.kernel_wrapper import KernelWrapper
 
 
@@ -40,32 +41,29 @@ def highlight_cpp(code: str):
     return HTML(highlight(code, CppLexer(), HtmlFormatter()))
 
 
-def get_code_obj(ast: Union[KernelWrapper], custom_backend=None):
+def get_code_obj(ast: KernelWrapper | PsKernelFunction, custom_backend=None):
     """Returns an object to display generated code (C/C++ or CUDA)
 
     Can either be displayed as HTML in Jupyter notebooks or printed as normal string.
     """
-    from pystencils.backends.cbackend import generate_c
+    from pystencils.backend.emission import emit_code
 
     if isinstance(ast, KernelWrapper):
         ast = ast.ast
 
-    if ast.backend not in {Backend.C, Backend.CUDA}:
-        raise NotImplementedError(f'get_code_obj is not implemented for backend {ast.backend}')
-    dialect = ast.backend
-
     class CodeDisplay:
         def __init__(self, ast_input):
             self.ast = ast_input
 
         def _repr_html_(self):
-            return highlight_cpp(generate_c(self.ast, dialect=dialect, custom_backend=custom_backend)).__html__()
+            return highlight_cpp(emit_code(self.ast)).__html__()
 
         def __str__(self):
-            return generate_c(self.ast, dialect=dialect, custom_backend=custom_backend)
+            return emit_code(self.ast)
 
         def __repr__(self):
-            return generate_c(self.ast, dialect=dialect, custom_backend=custom_backend)
+            return emit_code(self.ast)
+
     return CodeDisplay(ast)
 
 
@@ -86,7 +84,7 @@ def _isnotebook():
         return False
 
 
-def show_code(ast: Union[KernelWrapper], custom_backend=None):
+def show_code(ast: KernelWrapper | PsKernelFunction, custom_backend=None):
     code = get_code_obj(ast, custom_backend)
 
     if _isnotebook():
diff --git a/src/pystencils/kernelcreation.py b/src/pystencils/kernelcreation.py
index 954eb2e07aa35c7d0f7cd43af6aca52968f01def..852401aa43913e6ab70e8f2fe58a3b605e9578c3 100644
--- a/src/pystencils/kernelcreation.py
+++ b/src/pystencils/kernelcreation.py
@@ -1,15 +1,5 @@
-from typing import Sequence
-from dataclasses import dataclass
-
 from .enums import Target
-from .field import Field, FieldType
-
-from .backend.jit import JitBase
-from .backend.exceptions import PsOptionsError
-from .backend.types import PsIntegerType, PsNumericType, PsIeeeFloatType
-
-from .backend.kernelcreation.defaults import Sympy as SpDefaults
-
+from .config import CreateKernelConfig
 from .backend.ast import PsKernelFunction
 from .backend.kernelcreation import (
     KernelCreationContext,
@@ -24,105 +14,11 @@ from .backend.kernelcreation.iteration_space import (
 from .backend.kernelcreation.transformations import EraseAnonymousStructTypes
 
 from .enums import Target
-from .sympyextensions import AssignmentCollection
-
-
-@dataclass
-class CreateKernelConfig:
-    """Options for create_kernel."""
-
-    target: Target = Target.CPU
-    """The code generation target.
-    
-    TODO: Enhance `Target` from enum to a larger target spec, e.g. including vectorization architecture, ...
-    """
-
-    jit: JitBase | None = None
-    """Just-in-time compiler used to compile and load the kernel for invocation from the current Python environment.
-    
-    If left at `None`, a default just-in-time compiler will be inferred from the `target` parameter.
-    To explicitly disable JIT compilation, pass `nbackend.jit.no_jit`.
-    """
-
-    function_name: str = "kernel"
-    """Name of the generated function"""
-
-    ghost_layers: None | int | Sequence[int | tuple[int, int]] = None
-    """Specifies the number of ghost layers of the iteration region.
-    
-    Options:
-     - `None`: Required ghost layers are inferred from field accesses
-     - `int`:  A uniform number of ghost layers in each spatial coordinate is applied
-     - `Sequence[int, tuple[int, int]]`: Ghost layers are specified for each spatial coordinate.
-        In each coordinate, a single integer specifies the ghost layers at both the lower and upper iteration limit,
-        while a pair of integers specifies the lower and upper ghost layers separately.
-
-    When manually specifying ghost layers, it is the user's responsibility to avoid out-of-bounds memory accesses.
-    If `ghost_layers=None` is specified, the iteration region may otherwise be set using the `iteration_slice` option.
-    """
-
-    iteration_slice: None | tuple[slice, ...] = None
-    """Specifies the kernel's iteration slice.
-    
-    `iteration_slice` may only be set if `ghost_layers = None`.
-    If it is set, a slice must be specified for each spatial coordinate.
-    TODO: Specification of valid slices and their behaviour
-    """
-
-    index_field: Field | None = None
-    """Index field for a sparse kernel.
-    
-    If this option is set, a sparse kernel with the given field as index field will be generated.
-    """
-
-    """Data Types"""
-
-    index_dtype: PsIntegerType = SpDefaults.index_dtype
-    """Data type used for all index calculations."""
-
-    default_dtype: PsNumericType = PsIeeeFloatType(64)
-    """Default numeric data type.
-    
-    This data type will be applied to all untyped symbols.
-    """
-
-    def __post_init__(self):
-        #   Check iteration space argument consistency
-        if (
-            int(self.iteration_slice is not None)
-            + int(self.ghost_layers is not None)
-            + int(self.index_field is not None)
-            > 1
-        ):
-            raise PsOptionsError(
-                "Parameters `iteration_slice`, `ghost_layers` and 'index_field` are mutually exclusive; "
-                "at most one of them may be set."
-            )
-
-        #   Check index field
-        if (
-            self.index_field is not None
-            and self.index_field.field_type != FieldType.INDEXED
-        ):
-            raise PsOptionsError(
-                "Only fields with `field_type == FieldType.INDEXED` can be specified as `index_field`"
-            )
-
-        #   Infer JIT
-        if self.jit is None:
-            match self.target:
-                case Target.CPU:
-                    from .backend.jit import LegacyCpuJit
-
-                    self.jit = LegacyCpuJit()
-                case _:
-                    raise NotImplementedError(
-                        f"No default JIT compiler implemented yet for target {self.target}"
-                    )
+from .sympyextensions import AssignmentCollection, Assignment
 
 
 def create_kernel(
-    assignments: AssignmentCollection,
+    assignments: AssignmentCollection | list[Assignment],
     config: CreateKernelConfig = CreateKernelConfig(),
 ):
     """Create a kernel AST from an assignment collection."""
@@ -130,6 +26,9 @@ def create_kernel(
         default_dtype=config.default_dtype, index_dtype=config.index_dtype
     )
 
+    if not isinstance(assignments, AssignmentCollection):
+        assignments = AssignmentCollection(assignments)
+
     analysis = KernelAnalysis(ctx)
     analysis(assignments)
 
diff --git a/src/pystencils/old/config.py b/src/pystencils/old/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1b9b179ac89d385cf5d8f6b6b4c92a77e8e8915
--- /dev/null
+++ b/src/pystencils/old/config.py
@@ -0,0 +1,207 @@
+from copy import copy
+from collections import defaultdict
+from dataclasses import dataclass, field
+from types import MappingProxyType
+from typing import Union, Tuple, List, Dict, Callable, Any, DefaultDict, Iterable
+
+from pystencils import Target, Backend, Field
+from ..sympyextensions.typed_sympy import BasicType
+
+import numpy as np
+
+# TODO: There exists DTypeLike in NumPy which would be better than type for type hinting, to new at the moment
+# from numpy.typing import DTypeLike
+
+
+# TODO: CreateKernelConfig is bloated think of more classes better usage, factory whatever ...
+# Proposition: CreateKernelConfigs Classes for different targets?
+@dataclass
+class CreateKernelConfig:
+    """
+    **Below all parameters for the CreateKernelConfig are explained**
+    """
+    target: Target = Target.CPU
+    """
+    All targets are defined in :class:`pystencils.enums.Target`
+    """
+    backend: Backend = None
+    """
+    All backends are defined in :class:`pystencils.enums.Backend`
+    """
+    function_name: str = 'kernel'
+    """
+    Name of the generated function - only important if generated code is written out
+    """
+    data_type: Union[type, str, DefaultDict[str, BasicType], Dict[str, BasicType]] = np.float64
+    """
+    Data type used for all untyped symbols (i.e. non-fields), can also be a dict from symbol name to type.
+    If specified as a dict ideally a defaultdict is used to define a default value for symbols not listed in the
+    dict. If a plain dict is provided it will be transformed into a defaultdict internally. The default value 
+    will then be specified via type collation then.
+    """
+    default_number_float: Union[type, str, BasicType] = None
+    """
+    Data type used for all untyped floating point numbers (i.e. 0.5). By default the value of data_type is used.
+    If data_type is given as a defaultdict its default_factory is used.
+    """
+    default_number_int: Union[type, str, BasicType] = np.int64
+    """
+    Data type used for all untyped integer numbers (i.e. 1)
+    """
+    iteration_slice: Tuple = None
+    """
+    Rectangular subset to iterate over, if not specified the complete non-ghost layer part of the field is iterated over
+    """
+    ghost_layers: Union[bool, int, List[Tuple[int]]] = None
+    """
+    A single integer specifies the ghost layer count at all borders, can also be a sequence of
+    pairs ``[(x_lower_gl, x_upper_gl), .... ]``. These layers are excluded from the iteration.
+    If left to default, the number of ghost layers is determined automatically from the assignments.
+    """
+    cpu_openmp: Union[bool, int] = False
+    """
+    `True` or number of threads for OpenMP parallelization, `False` for no OpenMP. If set to `True`, the maximum number
+    of available threads will be chosen.
+    """
+    cpu_vectorize_info: Dict = None
+    """
+    A dictionary with keys, 'vector_instruction_set', 'assume_aligned' and 'nontemporal'
+    for documentation of these parameters see vectorize function. Example:
+    '{'instruction_set': 'avx512', 'assume_aligned': True, 'nontemporal':True}'
+    """
+    cpu_blocking: Tuple[int] = None
+    """
+    A tuple of block sizes or `None` if no blocking should be applied
+    """
+    omp_single_loop: bool = True
+    """
+    If OpenMP is active: whether multiple outer loops are permitted
+    """
+    base_pointer_specification: Union[List[Iterable[str]], List[Iterable[int]]] = None
+    """
+    Specification of how many and which intermediate pointers are created for a field access.
+    For example [ (0), (2,3,)]  creates on base pointer for coordinates 2 and 3 and writes the offset for coordinate
+    zero directly in the field access. These specifications are defined dependent on the loop ordering.
+    This function translates more readable version into the specification above.
+    
+    For more information see: `pystencils.transformations.create_intermediate_base_pointer`
+    """
+    gpu_indexing: str = 'block'
+    """
+    Either 'block' or 'line' , or custom indexing class, see `pystencils.gpu.AbstractIndexing`
+    """
+    gpu_indexing_params: MappingProxyType = field(default_factory=lambda: MappingProxyType({}))
+    """
+    Dict with indexing parameters (constructor parameters of indexing class)
+    e.g. for 'block' one can specify '{'block_size': (20, 20, 10) }'.
+    """
+    # TODO Markus rework this docstring
+    default_assignment_simplifications: bool = False
+    """
+    If `True` default simplifications are first performed on the Assignments. If problems occur during the
+    simplification a warning will be thrown.
+    Furthermore, it is essential to know that this is a two-stage process. The first stage of the process acts
+    on the level of the `pystencils.AssignmentCollection`.  In this part,
+    `pystencil.simp.create_simplification_strategy` from pystencils.simplificationfactory will be used to
+    apply optimisations like insertion of constants to
+    remove pressure from the registers. Thus the first part of the optimisations can only be executed if
+    an `AssignmentCollection` is passed. The second part of the optimisation acts on the level of each Assignment
+    individually. In this stage, all optimisations from `sympy.codegen.rewriting.optims_c99` are applied
+    to each Assignment. Thus this stage can also be applied if a list of Assignments is passed.
+    """
+    cpu_prepend_optimizations: List[Callable] = field(default_factory=list)
+    """
+    List of extra optimizations to perform first on the AST.
+    """
+    use_auto_for_assignments: bool = False
+    """
+    If set to `True`, auto can be used in the generated code for data types. This makes the type system more robust.
+    """
+    index_fields: List[Field] = None
+    """
+    List of index fields, i.e. 1D fields with struct data type. If not `None`, `create_index_kernel`
+    instead of `create_domain_kernel` is used.
+    """
+    coordinate_names: Tuple[str, Any] = ('x', 'y', 'z')
+    """
+    Name of the coordinate fields in the struct data type.
+    """
+    allow_double_writes: bool = False
+    """
+    If True, don't check if every field is only written at a single location. This is required
+    for example for kernels that are compiled with loop step sizes > 1, that handle multiple
+    cells at once. Use with care!
+    """
+    skip_independence_check: bool = False
+    """
+    Don't check that loop iterations are independent. This is needed e.g. for
+    periodicity kernel, that access the field outside the iteration bounds. Use with care!
+    """
+
+    class DataTypeFactory:
+        """Because of pickle, we need to have a nested class, instead of a lambda in __post_init__"""
+        def __init__(self, dt):
+            self.dt = dt
+
+        def __call__(self):
+            return BasicType(self.dt)
+
+    def _check_type(self, dtype_to_check):
+        if isinstance(dtype_to_check, str) and (dtype_to_check == 'float' or dtype_to_check == 'int'):
+            self._typing_error()
+
+        if isinstance(dtype_to_check, type) and not hasattr(dtype_to_check, "dtype"):
+            # NumPy-types are also of type 'type'. However, they have more properties
+            self._typing_error()
+
+    @staticmethod
+    def _typing_error():
+        raise ValueError("It is not possible to use python types (float, int) for datatypes because these "
+                         "types are ambiguous. For example float will map to double. "
+                         "Also the string version like 'float' is not allowed, e.g. use 'float64' instead")
+
+    def __post_init__(self):
+        # ----  Legacy parameters
+        if not isinstance(self.target, Target):
+            raise ValueError("target must be provided by the 'Target' enum")
+
+        # ---- Auto Backend
+        if not self.backend:
+            if self.target == Target.CPU:
+                self.backend = Backend.C
+            elif self.target == Target.GPU:
+                self.backend = Backend.CUDA
+            else:
+                raise NotImplementedError(f'Target {self.target} has no default backend')
+
+        if not isinstance(self.backend, Backend):
+            raise ValueError("backend must be provided by the 'Backend' enum")
+
+        # Normalise data types
+        for dtype in [self.data_type, self.default_number_float, self.default_number_int]:
+            self._check_type(dtype)
+
+        if not isinstance(self.data_type, dict):
+            dt = copy(self.data_type)  # The copy is necessary because BasicType has sympy shinanigans
+            self.data_type = defaultdict(self.DataTypeFactory(dt))
+
+        if isinstance(self.data_type, dict) and not isinstance(self.data_type, defaultdict):
+            for dtype in self.data_type.values():
+                self._check_type(dtype)
+
+            dt = collate_types([BasicType(dtype) for dtype in self.data_type.values()])
+            dtype_dict = self.data_type
+            self.data_type = defaultdict(self.DataTypeFactory(dt), dtype_dict)
+
+        assert isinstance(self.data_type, defaultdict), "At this point data_type must be a defaultdict!"
+        for dtype in self.data_type.values():
+            self._check_type(dtype)
+        self._check_type(self.data_type.default_factory())
+
+        if self.default_number_float is None:
+            self.default_number_float = self.data_type.default_factory()
+
+        if not isinstance(self.default_number_float, BasicType):
+            self.default_number_float = BasicType(self.default_number_float)
+        if not isinstance(self.default_number_int, BasicType):
+            self.default_number_int = BasicType(self.default_number_int)
diff --git a/src/pystencils/old/cpu/kernelcreation.py b/src/pystencils/old/cpu/kernelcreation.py
index 56262a6e48227ebb1c05291738f0a2d232f3e29a..eff8e0a40739bd414a6ffb5ba0bfd0aaeeb01ca4 100644
--- a/src/pystencils/old/cpu/kernelcreation.py
+++ b/src/pystencils/old/cpu/kernelcreation.py
@@ -1,7 +1,7 @@
 import sympy as sp
 
 import pystencils.sympyextensions.astnodes as ast
-from pystencils.config import CreateKernelConfig
+from pystencils.old.config import CreateKernelConfig
 from pystencils.enums import Target, Backend
 from pystencils.sympyextensions.astnodes import Block, KernelFunction, LoopOverCoordinate, SympyAssignment
 from pystencils.cpu.cpujit import make_python_function
diff --git a/src/pystencils/old/gpu/kernelcreation.py b/src/pystencils/old/gpu/kernelcreation.py
index 88c268809c65ccce1cb80e6c58f8ed304c40bbc4..89c7b3b10cb10e1700d36b5314e4ad883f067d8c 100644
--- a/src/pystencils/old/gpu/kernelcreation.py
+++ b/src/pystencils/old/gpu/kernelcreation.py
@@ -1,7 +1,7 @@
 import sympy as sp
 
 from pystencils.sympyextensions.astnodes import Block, KernelFunction, LoopOverCoordinate, SympyAssignment
-from pystencils.config import CreateKernelConfig
+from pystencils.old.config import CreateKernelConfig
 from pystencils.typing import StructType, TypedSymbol
 from pystencils.typing.transformations import add_types
 from pystencils.field import Field, FieldType
diff --git a/src/pystencils/old/kernelcreation.py b/src/pystencils/old/kernelcreation.py
index 12dddee1678a810632ad958693e60c4cc881c186..de064e8e697394ca2594d9e4c44636174782e28f 100644
--- a/src/pystencils/old/kernelcreation.py
+++ b/src/pystencils/old/kernelcreation.py
@@ -3,7 +3,7 @@ import warnings
 from typing import Union, List
 
 import sympy as sp
-from pystencils.config import CreateKernelConfig
+from pystencils.old.config import CreateKernelConfig
 
 from pystencils.sympyextensions.assignmentcollection.assignment import Assignment, AddAugmentedAssignment
 from pystencils.sympyextensions.astnodes import Node, Block, Conditional, LoopOverCoordinate, SympyAssignment
diff --git a/src/pystencils/old/typing/transformations.py b/src/pystencils/old/typing/transformations.py
index 2cd477e125bc6ceb19993344562087ac2afa4fd3..6047ada7093eae76eed7cac34c8942740a5c358e 100644
--- a/src/pystencils/old/typing/transformations.py
+++ b/src/pystencils/old/typing/transformations.py
@@ -1,7 +1,7 @@
 from typing import List
 
 from pystencils.sympyextensions.astnodes import Node
-from pystencils.config import CreateKernelConfig
+from pystencils.old.config import CreateKernelConfig
 from pystencils.typing.leaf_typing import TypeAdder
 
 
diff --git a/tests/test_buffer.py b/tests/test_buffer.py
index a21d68cc406c604b3566b46a0167a4ba356c59cc..32d2eade1ed154b49521d4669873599c7ffc13c8 100644
--- a/tests/test_buffer.py
+++ b/tests/test_buffer.py
@@ -1,5 +1,7 @@
+#%%
 """Tests  (un)packing (from)to buffers."""
 
+import pytest
 import numpy as np
 
 import pystencils as ps
@@ -41,9 +43,7 @@ def test_full_scalar_field():
                                       field_type=FieldType.BUFFER, dtype=src_arr.dtype)
 
         pack_eqs = [Assignment(buffer.center(), src_field.center())]
-        config = ps.CreateKernelConfig(data_type={'src_field': src_arr.dtype, 'buffer': buffer.dtype})
-        pack_code = create_kernel(pack_eqs, config=config)
-        code = ps.get_code_str(pack_code)
+        pack_code = create_kernel(pack_eqs)
         ps.show_code(pack_code)
 
         pack_kernel = pack_code.compile()
@@ -51,8 +51,7 @@ def test_full_scalar_field():
 
         unpack_eqs = [Assignment(dst_field.center(), buffer.center())]
 
-        config = ps.CreateKernelConfig(data_type={'dst_field': dst_arr.dtype, 'buffer': buffer.dtype})
-        unpack_code = create_kernel(unpack_eqs, config=config)
+        unpack_code = create_kernel(unpack_eqs)
 
         unpack_kernel = unpack_code.compile()
         unpack_kernel(dst_field=dst_arr, buffer=buffer_arr)
@@ -77,8 +76,7 @@ def test_field_slice():
 
             pack_eqs = [Assignment(buffer.center(), src_field.center())]
 
-            config = ps.CreateKernelConfig(data_type={'src_field': src_arr.dtype, 'buffer': buffer.dtype})
-            pack_code = create_kernel(pack_eqs, config=config)
+            pack_code = create_kernel(pack_eqs)
 
             pack_kernel = pack_code.compile()
             pack_kernel(buffer=bufferArr, src_field=src_arr[pack_slice])
@@ -86,8 +84,7 @@ def test_field_slice():
             # Unpack into ghost layer of dst_field in N direction
             unpack_eqs = [Assignment(dst_field.center(), buffer.center())]
 
-            config = ps.CreateKernelConfig(data_type={'dst_field': dst_arr.dtype, 'buffer': buffer.dtype})
-            unpack_code = create_kernel(unpack_eqs, config=config)
+            unpack_code = create_kernel(unpack_eqs)
 
             unpack_kernel = unpack_code.compile()
             unpack_kernel(buffer=bufferArr, dst_field=dst_arr[unpack_slice])
@@ -102,7 +99,7 @@ def test_all_cell_values():
     for (src_arr, dst_arr, bufferArr) in fields:
         src_field = Field.create_from_numpy_array("src_field", src_arr, index_dimensions=1)
         dst_field = Field.create_from_numpy_array("dst_field", dst_arr, index_dimensions=1)
-        buffer = Field.create_generic("buffer", spatial_dimensions=1, index_dimensions=1,
+        buffer = Field.create_generic("buffer", spatial_dimensions=1, index_shape=(num_cell_values,),
                                       field_type=FieldType.BUFFER, dtype=src_arr.dtype)
 
         pack_eqs = []
@@ -112,8 +109,7 @@ def test_all_cell_values():
             eq = Assignment(buffer(idx), src_field(idx))
             pack_eqs.append(eq)
 
-        config = ps.CreateKernelConfig(data_type={'src_field': src_arr.dtype, 'buffer': buffer.dtype})
-        pack_code = create_kernel(pack_eqs, config=config)
+        pack_code = create_kernel(pack_eqs)
         pack_kernel = pack_code.compile()
         pack_kernel(buffer=bufferArr, src_field=src_arr)
 
@@ -123,8 +119,7 @@ def test_all_cell_values():
             eq = Assignment(dst_field(idx), buffer(idx))
             unpack_eqs.append(eq)
 
-        config = ps.CreateKernelConfig(data_type={'dst_field': dst_arr.dtype, 'buffer': buffer.dtype})
-        unpack_code = create_kernel(unpack_eqs, config=config)
+        unpack_code = create_kernel(unpack_eqs)
         unpack_kernel = unpack_code.compile()
         unpack_kernel(buffer=bufferArr, dst_field=dst_arr)
 
@@ -140,7 +135,7 @@ def test_subset_cell_values():
     for (src_arr, dst_arr, bufferArr) in fields:
         src_field = Field.create_from_numpy_array("src_field", src_arr, index_dimensions=1)
         dst_field = Field.create_from_numpy_array("dst_field", dst_arr, index_dimensions=1)
-        buffer = Field.create_generic("buffer", spatial_dimensions=1, index_dimensions=1,
+        buffer = Field.create_generic("buffer", spatial_dimensions=1, index_shape=(len(cell_indices),),
                                       field_type=FieldType.BUFFER, dtype=src_arr.dtype)
 
         pack_eqs = []
@@ -150,8 +145,7 @@ def test_subset_cell_values():
             eq = Assignment(buffer(buffer_idx), src_field(cell_idx))
             pack_eqs.append(eq)
 
-        config = ps.CreateKernelConfig(data_type={'src_field': src_arr.dtype, 'buffer': buffer.dtype})
-        pack_code = create_kernel(pack_eqs, config=config)
+        pack_code = create_kernel(pack_eqs)
         pack_kernel = pack_code.compile()
         pack_kernel(buffer=bufferArr, src_field=src_arr)
 
@@ -161,8 +155,7 @@ def test_subset_cell_values():
             eq = Assignment(dst_field(cell_idx), buffer(buffer_idx))
             unpack_eqs.append(eq)
 
-        config = ps.CreateKernelConfig(data_type={'dst_field': dst_arr.dtype, 'buffer': buffer.dtype})
-        unpack_code = create_kernel(unpack_eqs, config=config)
+        unpack_code = create_kernel(unpack_eqs)
         unpack_kernel = unpack_code.compile()
         unpack_kernel(buffer=bufferArr, dst_field=dst_arr)
 
@@ -177,7 +170,7 @@ def test_field_layouts():
         for (src_arr, dst_arr, bufferArr) in fields:
             src_field = Field.create_from_numpy_array("src_field", src_arr, index_dimensions=1)
             dst_field = Field.create_from_numpy_array("dst_field", dst_arr, index_dimensions=1)
-            buffer = Field.create_generic("buffer", spatial_dimensions=1, index_dimensions=1,
+            buffer = Field.create_generic("buffer", spatial_dimensions=1, index_shape=(num_cell_values,),
                                           field_type=FieldType.BUFFER, dtype=src_arr.dtype)
 
             pack_eqs = []
@@ -187,8 +180,7 @@ def test_field_layouts():
                 eq = Assignment(buffer(idx), src_field(idx))
                 pack_eqs.append(eq)
 
-            config = ps.CreateKernelConfig(data_type={'src_field': src_arr.dtype, 'buffer': buffer.dtype})
-            pack_code = create_kernel(pack_eqs, config=config)
+            pack_code = create_kernel(pack_eqs)
             pack_kernel = pack_code.compile()
             pack_kernel(buffer=bufferArr, src_field=src_arr)
 
@@ -198,12 +190,11 @@ def test_field_layouts():
                 eq = Assignment(dst_field(idx), buffer(idx))
                 unpack_eqs.append(eq)
 
-            config = ps.CreateKernelConfig(data_type={'dst_field': dst_arr.dtype, 'buffer': buffer.dtype})
-            unpack_code = create_kernel(unpack_eqs, config=config)
+            unpack_code = create_kernel(unpack_eqs)
             unpack_kernel = unpack_code.compile()
             unpack_kernel(buffer=bufferArr, dst_field=dst_arr)
 
-
+@pytest.mark.xfail(reason="iteration slices not implemented yet")
 def test_iteration_slices():
     num_cell_values = 19
     dt = np.uint64
@@ -233,8 +224,7 @@ def test_iteration_slices():
         src_arr[(slice(None, None, 1),) * dim] = np.arange(num_cell_values)
         dst_arr.fill(0)
 
-        config = ps.CreateKernelConfig(iteration_slice=pack_slice,
-                                       data_type={'src_field': src_arr.dtype, 'buffer': buffer.dtype})
+        config = ps.CreateKernelConfig(iteration_slice=pack_slice)
 
         pack_code = create_kernel(pack_eqs, config=config)
         pack_kernel = pack_code.compile()
@@ -246,8 +236,7 @@ def test_iteration_slices():
             eq = Assignment(dst_field(idx), buffer(idx))
             unpack_eqs.append(eq)
 
-        config = ps.CreateKernelConfig(iteration_slice=pack_slice,
-                                       data_type={'dst_field': dst_arr.dtype, 'buffer': buffer.dtype})
+        config = ps.CreateKernelConfig(iteration_slice=pack_slice)
 
         unpack_code = create_kernel(unpack_eqs, config=config)
         unpack_kernel = unpack_code.compile()
@@ -257,3 +246,6 @@ def test_iteration_slices():
         np.testing.assert_equal(dst_arr[pack_slice], src_arr[pack_slice])
         np.testing.assert_equal(dst_arr[(slice(1, None, 2),) * (dim - 1) + (0,)], 0)
         np.testing.assert_equal(dst_arr[(slice(None, None, 1),) * (dim - 1) + (slice(1, None),)], 0)
+
+#%%
+# test_all_cell_values()
\ No newline at end of file