diff --git a/datahandling/datahandling_interface.py b/datahandling/datahandling_interface.py
index 2a537206da3332d2f3145f0fb75f67d0284ab571..f2e3e1999229db97cbbbc0c8ecd9a27a6b30720d 100644
--- a/datahandling/datahandling_interface.py
+++ b/datahandling/datahandling_interface.py
@@ -128,24 +128,23 @@ class DataHandling(ABC):
     def toCpu(self, name):
         """Copies GPU data of array with specified name to CPU.
         Works only if 'cpu=True' and 'gpu=True' has been used in 'add' method"""
-        pass
 
     @abstractmethod
     def toGpu(self, name):
         """Copies GPU data of array with specified name to GPU.
         Works only if 'cpu=True' and 'gpu=True' has been used in 'add' method"""
-        pass
 
     @abstractmethod
     def allToCpu(self, name):
         """Copies data from GPU to CPU for all arrays that have a CPU and a GPU representation"""
-        pass
 
     @abstractmethod
     def allToGpu(self, name):
         """Copies data from CPU to GPU for all arrays that have a CPU and a GPU representation"""
-        pass
 
+    @abstractmethod
+    def isOnGpu(self, name):
+        """Checks if this data was also allocated on the GPU - does not check if this data item is in synced"""
 
     @abstractmethod
     def vtkWriter(self, fileName, dataNames, ghostLayers=False):
diff --git a/datahandling/parallel_datahandling.py b/datahandling/parallel_datahandling.py
index 08000b8b2930a9f4c020cf7bf731e7842595cbfd..acbdea1b0053cd4015b7dc40c5e734282236bbcb 100644
--- a/datahandling/parallel_datahandling.py
+++ b/datahandling/parallel_datahandling.py
@@ -121,6 +121,7 @@ class ParallelDataHandling(DataHandling):
         self._fieldNameToCpuDataName[latexName] = name
         if gpu:
             self._fieldNameToGpuDataName[latexName] = self.GPU_DATA_PREFIX + name
+        return self.fields[name]
 
     def hasData(self, name):
         return name in self._fields
@@ -213,6 +214,9 @@ class ParallelDataHandling(DataHandling):
         else:
             wlb.cuda.copyFieldToGpu(self.blocks, self.GPU_DATA_PREFIX + name, name)
 
+    def isOnGpu(self, name):
+        return name, self.GPU_DATA_PREFIX + name in self._cpuGpuPairs
+
     def allToCpu(self):
         for cpuName, gpuName in self._cpuGpuPairs:
             wlb.cuda.copyFieldToCpu(self.blocks, gpuName, cpuName)
diff --git a/datahandling/serial_datahandling.py b/datahandling/serial_datahandling.py
index d422e76a6b87c2af68271cc7fdd7f96a6c8f83e0..0752668b90a9fffdc6403f1b634a47e9d29d3630 100644
--- a/datahandling/serial_datahandling.py
+++ b/datahandling/serial_datahandling.py
@@ -106,6 +106,7 @@ class SerialDataHandling(DataHandling):
         self.fields[name] = Field.createFixedSize(latexName, shape=kwargs['shape'], indexDimensions=indexDimensions,
                                                   dtype=kwargs['dtype'], layout=layoutTuple)
         self._fieldLatexNameToDataName[latexName] = name
+        return self.fields[name]
 
     def addCustomData(self, name, cpuCreationFunction,
                       gpuCreationFunction=None, cpuToGpuTransferFunc=None, gpuToCpuTransferFunc=None):
@@ -209,6 +210,9 @@ class SerialDataHandling(DataHandling):
         else:
             self.gpuArrays[name].set(self.cpuArrays[name])
 
+    def isOnGpu(self, name):
+        return name in self.gpuArrays
+
     def synchronizationFunctionCPU(self, names, stencilName=None, **kwargs):
         return self._synchronizationFunctor(names, stencilName, 'cpu')