diff --git a/src/pystencils/__init__.py b/src/pystencils/__init__.py
index 27d3df76ce8740ad7b0ca0885ba95de23c32127f..56cf6c2bb94971d0915603bffbf8886cf9b187c9 100644
--- a/src/pystencils/__init__.py
+++ b/src/pystencils/__init__.py
@@ -1,6 +1,6 @@
 """Module to generate stencil kernels in C or CUDA using sympy expressions and call them as Python functions"""
 
-from .enums import Backend, Target
+from .enums import Target
 from .defaults import DEFAULTS
 from . import fd
 from . import stencil as stencil
@@ -39,7 +39,6 @@ __all__ = [
     "create_kernel",
     "KernelFunction",
     "Target",
-    "Backend",
     "show_code",
     "to_dot",
     "get_code_obj",
diff --git a/src/pystencils/config.py b/src/pystencils/config.py
index 300123a75642037e278dd7b12343c435729d35e0..d63bd9336373d15e863dc2421a18a010782948f5 100644
--- a/src/pystencils/config.py
+++ b/src/pystencils/config.py
@@ -15,11 +15,8 @@ from .defaults import DEFAULTS
 class CreateKernelConfig:
     """Options for create_kernel."""
 
-    target: Target = Target.CPU
-    """The code generation target.
-    
-    TODO: Enhance `Target` from enum to a larger target spec, e.g. including vectorization architecture, ...
-    """
+    target: Target = Target.GenericCPU
+    """The code generation target."""
 
     jit: JitBase | None = None
     """Just-in-time compiler used to compile and load the kernel for invocation from the current Python environment.
@@ -94,12 +91,10 @@ class CreateKernelConfig:
 
         #   Infer JIT
         if self.jit is None:
-            match self.target:
-                case Target.CPU:
-                    from .backend.jit import LegacyCpuJit
-
-                    self.jit = LegacyCpuJit()
-                case _:
-                    raise NotImplementedError(
-                        f"No default JIT compiler implemented yet for target {self.target}"
-                    )
+            if self.target.is_cpu():
+                from .backend.jit import LegacyCpuJit
+                self.jit = LegacyCpuJit()
+            else:
+                raise NotImplementedError(
+                    f"No default JIT compiler implemented yet for target {self.target}"
+                )
diff --git a/src/pystencils/enums.py b/src/pystencils/enums.py
index a52a049a9101b20dd8f1f347f5cd2d72b09a8016..632c753f4cfa1f61f9a482c6724f7d03fe10e91d 100644
--- a/src/pystencils/enums.py
+++ b/src/pystencils/enums.py
@@ -1,30 +1,93 @@
-from enum import Enum, auto
+from enum import Flag, auto
 
 
-class Target(Enum):
+class Target(Flag):
     """
     The Target enumeration represents all possible targets that can be used for the code generation.
     """
-    CPU = auto()
-    """
-    Target CPU architecture.
+
+    #   ------------------ Component Flags - Do Not Use Directly! -------------------------------------------
+
+    _CPU = auto()
+
+    _VECTOR = auto()
+
+    _X86 = auto()
+    _SSE = auto()
+    _AVX = auto()
+    _AVX512 = auto()
+
+    _ARM = auto()
+    _NEON = auto()
+    _SVE = auto()
+
+    _GPU = auto()
+
+    _CUDA = auto()
+
+    _AUTOMATIC = auto()
+
+    #   ------------------ Actual Targets -------------------------------------------------------------------
+
+    CurrentCPU = _CPU | _AUTOMATIC
     """
-    GPU = auto()
+    Auto-best CPU target. 
+    
+    `CurrentCPU` causes the code generator to automatically select a CPU target according to CPUs found
+    on the current machine and runtime environment.
     """
-    Target GPU architecture.
+
+    GenericCPU = _CPU
+    """Generic CPU target.
+    
+    Generate the kernel for a generic multicore CPU architecture. This opens up all architecture-independent
+    optimizations including OpenMP, but no vectorization.
     """
 
+    CPU = GenericCPU
+    """Alias for backward-compatibility"""
 
-class Backend(Enum):
-    """
-    The Backend enumeration represents all possible backends that can be used for the code generation.
-    Backends and targets must be combined with care. For example CPU as a target and CUDA as a backend makes no sense.
-    """
-    C = auto()
+    X86_SSE = _CPU | _VECTOR | _X86 | _SSE
+    """x86 architecture with SSE vector extensions."""
+
+    X86_AVX = _CPU | _VECTOR | _X86 | _AVX
+    """x86 architecture with AVX vector extensions."""
+
+    X86_AVX512 = _CPU | _VECTOR | _X86 | _AVX512
+    """x86 architecture with AVX512 vector extensions."""
+
+    ARM_NEON = _CPU | _VECTOR | _ARM | _NEON
+    """ARM architecture with NEON vector extensions"""
+
+    ARM_SVE = _CPU | _VECTOR | _ARM | _SVE
+    """ARM architecture with SVE vector extensions"""
+
+    CurrentGPU = _GPU | _AUTOMATIC
     """
-    Use the C Backend of pystencils.
+    Auto-best GPU target.
+
+    `CurrentGPU` causes the code generator to automatically select a GPU target according to GPU devices
+    found on the current machine and runtime environment.
     """
-    CUDA = auto()
+
+    GenericCUDA = _GPU | _CUDA
     """
-    Use the CUDA backend to generate code for NVIDIA GPUs.
+    Generic CUDA GPU target.
+
+    Generate a CUDA kernel for a generic Nvidia GPU.
     """
+
+    GPU = GenericCUDA
+    """Alias for backward compatibility."""
+
+    def is_automatic(self) -> bool:
+        return Target._AUTOMATIC in self
+
+    def is_cpu(self) -> bool:
+        return Target._CPU in self
+
+    def is_vector_cpu(self) -> bool:
+        return self.is_cpu() and Target._VECTOR in self
+
+    def is_gpu(self) -> bool:
+        return Target._GPU in self
diff --git a/src/pystencils/kernelcreation.py b/src/pystencils/kernelcreation.py
index 595d413c9f4f1ac97954c703971ac65946220333..439eb787225e4b83357a7a7df68afa13e5dc4d28 100644
--- a/src/pystencils/kernelcreation.py
+++ b/src/pystencils/kernelcreation.py
@@ -72,10 +72,8 @@ def create_kernel(
     kernel_body = typify(kernel_body)
 
     match config.target:
-        case Target.CPU:
+        case Target.GenericCPU:
             from .backend.platforms import GenericCpu
-
-            #   TODO: CPU platform should incorporate instruction set info, OpenMP, etc.
             platform = GenericCpu(ctx)
         case _:
             #   TODO: CUDA/HIP platform