From 26d174f054fbb1339bf62f8566760777195da8e4 Mon Sep 17 00:00:00 2001
From: markus <markus.holzer@fau.de>
Date: Fri, 10 Jul 2020 09:45:15 +0200
Subject: [PATCH] Removed OpenCL parallel Datahandling test case

---
 lbmpy_tests/test_lbstep.py | 39 +++++++++++++++++++++-----------------
 1 file changed, 22 insertions(+), 17 deletions(-)

diff --git a/lbmpy_tests/test_lbstep.py b/lbmpy_tests/test_lbstep.py
index eabbf262..02e3c44a 100644
--- a/lbmpy_tests/test_lbstep.py
+++ b/lbmpy_tests/test_lbstep.py
@@ -54,24 +54,29 @@ def test_data_handling_2d_opencl():
     pystencils.opencl.opencljit.init_globally()
     print("--- LDC 2D test ---")
     results = []
-    for parallel in [True, False] if parallel_available else [False]:
-        for gpu in [True, False] if gpu_available else [False]:
-            if parallel and gpu and not hasattr(wLB, 'cuda'):
-                continue
 
-            print("Testing parallel: %s\tgpu: %s" % (parallel, gpu))
-            opt_params = {'target': 'opencl' if gpu else 'cpu',
-                          'gpu_indexing_params': {'block_size': (8, 4, 2)}}
-            if parallel:
-                from pystencils.datahandling import ParallelDataHandling
-                blocks = wLB.createUniformBlockGrid(blocks=(2, 3, 1), cellsPerBlock=(5, 5, 1),
-                                                    oneBlockPerProcess=False)
-                dh = ParallelDataHandling(blocks, dim=2)
-                rho = ldc_setup(data_handling=dh, optimization=opt_params)
-                results.append(rho)
-            else:
-                rho = ldc_setup(domain_size=(10, 15), parallel=False, optimization=opt_params)
-                results.append(rho)
+    # Since waLBerla has no OpenCL Backend yet, it is not possible to use the
+    # parallel Datahandling with OpenCL at the moment
+
+    # TODO: Activate parallel Datahandling if Backend is available
+    parallel = False
+    for gpu in [True, False] if gpu_available else [False]:
+        if parallel and gpu and not hasattr(wLB, 'cuda'):
+            continue
+
+        print("Testing parallel: %s\tgpu: %s" % (parallel, gpu))
+        opt_params = {'target': 'opencl' if gpu else 'cpu',
+                      'gpu_indexing_params': {'block_size': (8, 4, 2)}}
+        if parallel:
+            from pystencils.datahandling import ParallelDataHandling
+            blocks = wLB.createUniformBlockGrid(blocks=(2, 3, 1), cellsPerBlock=(5, 5, 1),
+                                                oneBlockPerProcess=False)
+            dh = ParallelDataHandling(blocks, dim=2)
+            rho = ldc_setup(data_handling=dh, optimization=opt_params)
+            results.append(rho)
+        else:
+            rho = ldc_setup(domain_size=(10, 15), parallel=False, optimization=opt_params)
+            results.append(rho)
     for i, arr in enumerate(results[1:]):
         print("Testing equivalence version 0 with version %d" % (i + 1,))
         np.testing.assert_almost_equal(results[0], arr)
-- 
GitLab