From e462d7f46d02bece7d6c35783c6e24dcdb247209 Mon Sep 17 00:00:00 2001
From: markus holzer <markus.holzer@fau.de>
Date: Sat, 8 Aug 2020 23:05:58 +0200
Subject: [PATCH] Extended datahandling test cases

---
 .../datahandling/datahandling_interface.py    | 11 +++
 .../datahandling/serial_datahandling.py       |  4 +-
 pystencils_tests/test_datahandling.py         | 67 +++++++++++++++++++
 3 files changed, 80 insertions(+), 2 deletions(-)

diff --git a/pystencils/datahandling/datahandling_interface.py b/pystencils/datahandling/datahandling_interface.py
index 272f1660c..0eb101815 100644
--- a/pystencils/datahandling/datahandling_interface.py
+++ b/pystencils/datahandling/datahandling_interface.py
@@ -86,6 +86,13 @@ class DataHandling(ABC):
         Args:
             description (str): String description of the fields to add
             dtype: data type of the array as numpy data type
+            ghost_layers: number of ghost layers - if not specified a default value specified in the constructor
+                         is used
+            layout: memory layout of array, either structure of arrays 'SoA' or array of structures 'AoS'.
+                    this is only important if values_per_cell > 1
+            cpu: allocate field on the CPU
+            gpu: allocate field on the GPU, if None, a GPU field is allocated if default_target is 'gpu'
+            alignment: either False for no alignment, or the number of bytes to align to
         Returns:
             Fields representing the just created arrays
         """
@@ -200,6 +207,10 @@ class DataHandling(ABC):
         directly passed to the kernel function and override possible parameters from the DataHandling
         """
 
+    @abstractmethod
+    def get_kernel_kwargs(self, kernel_function, **kwargs):
+        """Returns the input arguments of a kernel"""
+
     @abstractmethod
     def swap(self, name1, name2, gpu=False):
         """Swaps data of two arrays"""
diff --git a/pystencils/datahandling/serial_datahandling.py b/pystencils/datahandling/serial_datahandling.py
index 7352951d2..25a4d23ee 100644
--- a/pystencils/datahandling/serial_datahandling.py
+++ b/pystencils/datahandling/serial_datahandling.py
@@ -266,10 +266,10 @@ class SerialDataHandling(DataHandling):
         return name in self.gpu_arrays
 
     def synchronization_function_cpu(self, names, stencil_name=None, **_):
-        return self.synchronization_function(names, stencil_name, 'cpu')
+        return self.synchronization_function(names, stencil_name, target='cpu')
 
     def synchronization_function_gpu(self, names, stencil_name=None, **_):
-        return self.synchronization_function(names, stencil_name, 'gpu')
+        return self.synchronization_function(names, stencil_name, target='gpu')
 
     def synchronization_function(self, names, stencil=None, target=None, **_):
         if target is None:
diff --git a/pystencils_tests/test_datahandling.py b/pystencils_tests/test_datahandling.py
index 7f95fe1f0..4d6dd72a4 100644
--- a/pystencils_tests/test_datahandling.py
+++ b/pystencils_tests/test_datahandling.py
@@ -111,6 +111,14 @@ def kernel_execution_jacobi(dh, target):
     test_gpu = target == 'gpu' or target == 'opencl'
     dh.add_array('f', gpu=test_gpu)
     dh.add_array('tmp', gpu=test_gpu)
+
+    if test_gpu:
+        assert dh.is_on_gpu('f')
+        assert dh.is_on_gpu('tmp')
+
+    with pytest.raises(ValueError):
+        dh.add_array('f', gpu=test_gpu)
+
     stencil_2d = [(1, 0), (-1, 0), (0, 1), (0, -1)]
     stencil_3d = [(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (0, 0, 1), (0, 0, -1)]
     stencil = stencil_2d if dh.dim == 2 else stencil_3d
@@ -197,6 +205,7 @@ def test_access_and_gather():
 def test_kernel():
     for domain_shape in [(4, 5), (3, 4, 5)]:
         dh = create_data_handling(domain_size=domain_shape, periodicity=True)
+        assert all(dh.periodicity)
         kernel_execution_jacobi(dh, 'cpu')
         reduction(dh)
 
@@ -243,3 +252,61 @@ def test_add_arrays():
     assert y_ == y
     assert x == dh.fields['x']
     assert y == dh.fields['y']
+
+
+def test_get_kwarg():
+    domain_shape = (10, 10)
+    field_description = 'src, dst'
+
+    dh = create_data_handling(domain_size=domain_shape, default_ghost_layers=1)
+    src, dst = dh.add_arrays(field_description)
+    dh.fill("src", 1.0, ghost_layers=True)
+    dh.fill("dst", 0.0, ghost_layers=True)
+
+    ur = ps.Assignment(src.center, dst.center)
+    kernel = ps.create_kernel(ur).compile()
+
+    kw = dh.get_kernel_kwargs(kernel)
+    assert np.all(kw[0]['src'] == dh.cpu_arrays['src'])
+    assert np.all(kw[0]['dst'] == dh.cpu_arrays['dst'])
+
+
+def test_add_custom_data():
+    pytest.importorskip('pycuda')
+
+    import pycuda.gpuarray as gpuarray
+    import pycuda.autoinit  # noqa
+
+    def cpu_data_create_func():
+        return np.ones((2, 2), dtype=np.float64)
+
+    def gpu_data_create_func():
+        return gpuarray.zeros((2, 2), dtype=np.float64)
+
+    def cpu_to_gpu_transfer_func(gpuarr, cpuarray):
+        gpuarr.set(cpuarray)
+
+    def gpu_to_cpu_transfer_func(gpuarr, cpuarray):
+        gpuarr.get(cpuarray)
+
+    dh = create_data_handling(domain_size=(10, 10))
+    dh.add_custom_data('custom_data',
+                       cpu_data_create_func,
+                       gpu_data_create_func,
+                       cpu_to_gpu_transfer_func,
+                       gpu_to_cpu_transfer_func)
+
+    assert np.all(dh.custom_data_cpu['custom_data'] == 1)
+    assert np.all(dh.custom_data_gpu['custom_data'].get() == 0)
+
+    dh.to_cpu(name='custom_data')
+    dh.to_gpu(name='custom_data')
+
+    assert 'custom_data' in dh.custom_data_names
+
+
+def test_log():
+    dh = create_data_handling(domain_size=(10, 10))
+    dh.log_on_root()
+    assert dh.is_root
+    assert dh.world_rank == 0
-- 
GitLab