Skip to content
Snippets Groups Projects
Commit 5ef6db6a authored by Markus Holzer's avatar Markus Holzer
Browse files

Add test case

parent c1c692a8
2 merge requests!318[Fix] GPU Buffer with iteration slices,!317[FIX] Iteration slices with GPU kernels
This commit is part of merge request !318. Comments created here will be created in the context of that merge request.
...@@ -171,12 +171,13 @@ class TypeAdder: ...@@ -171,12 +171,13 @@ class TypeAdder:
args_types = [self.figure_out_type(a) for a in expr.args] args_types = [self.figure_out_type(a) for a in expr.args]
new_args = [a if t.dtype_eq(bool_type) else BooleanCastFunc(a, bool_type) for a, t in args_types] new_args = [a if t.dtype_eq(bool_type) else BooleanCastFunc(a, bool_type) for a, t in args_types]
return expr.func(*new_args), bool_type return expr.func(*new_args), bool_type
elif type(expr, ) in pystencils.integer_functions.__dict__.values(): elif type(expr, ) in pystencils.integer_functions.__dict__.values() or isinstance(expr, sp.Mod):
args_types = [self.figure_out_type(a) for a in expr.args] args_types = [self.figure_out_type(a) for a in expr.args]
collated_type = collate_types([t for _, t in args_types]) collated_type = collate_types([t for _, t in args_types])
# TODO: should we downcast to integer? If yes then which integer type? # TODO: should we downcast to integer? If yes then which integer type?
if not collated_type.is_int(): if not collated_type.is_int():
raise ValueError(f"Integer functions need to be used with integer types but {collated_type} was given") raise ValueError(f"Integer functions or Modulo need to be used with integer types "
f"but {collated_type} was given")
return expr, collated_type return expr, collated_type
elif isinstance(expr, flag_cond): elif isinstance(expr, flag_cond):
......
import pytest
import sympy as sp
import pystencils as ps
from pystencils.astnodes import LoopOverCoordinate, Conditional, Block, SympyAssignment
@pytest.mark.parametrize('target', [ps.Target.CPU, ps.Target.GPU])
@pytest.mark.parametrize('iteration_slice', [False, True])
def test_mod(target, iteration_slice):
if target == ps.Target.GPU:
pytest.importorskip("pycuda")
dh = ps.create_data_handling(domain_size=(5, 5), periodicity=True, default_target=ps.Target.CPU)
loop_ctrs = [LoopOverCoordinate.get_loop_counter_symbol(i) for i in range(dh.dim)]
cond = [sp.Eq(sp.Mod(loop_ctrs[i], 2), 1) for i in range(dh.dim)]
field = dh.add_array("a", values_per_cell=1)
eq_list = [SympyAssignment(field.center, 1.0)]
if iteration_slice:
iteration_slice = ps.make_slice[1:-1:2, 1:-1:2]
config = ps.CreateKernelConfig(target=dh.default_target, iteration_slice=iteration_slice)
assign = eq_list
else:
assign = [Conditional(sp.And(*cond), Block(eq_list))]
config = ps.CreateKernelConfig(target=dh.default_target)
kernel = ps.create_kernel(assign, config=config).compile()
dh.fill(field.name, 0, ghost_layers=True)
if config.target == ps.enums.Target.GPU:
dh.to_gpu(field.name)
dh.run_kernel(kernel)
if config.target == ps.enums.Target.GPU:
dh.to_cpu(field.name)
result = dh.gather_array(field.name, ghost_layers=True)
for x in range(result.shape[0]):
for y in range(result.shape[1]):
if x % 2 == 1 and y % 2 == 1:
assert result[x, y] == 1.0
else:
assert result[x, y] == 0.0
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment