iteration_slice with step-size for GPU-kernels
Step-size of iteration_slice
is ignored for GPU-kernels.
import pystencils as ps
dh = ps.create_data_handling(domain_size=(5, 5), periodicity=True, default_target="gpu")
iteration_slice = ps.make_slice[1:-1:2, 1:-1:2]
config = ps.CreateKernelConfig(target=dh.default_target, iteration_slice=iteration_slice)
field = dh.add_array("a")
assign = ps.Assignment(field.center, 1.0)
kernel = ps.create_kernel(assign, config=config).compile()
dh.fill(field.name, 0, ghost_layers=True)
if config.target == ps.enums.Target.GPU:
dh.to_gpu(field.name)
dh.run_kernel(kernel)
if config.target == ps.enums.Target.GPU:
dh.to_cpu(field.name)
print(dh.gather_array(field.name, ghost_layers=True))
results in
[[0. 0. 0. 0. 0. 0. 0.]
[0. 1. 1. 1. 1. 1. 0.]
[0. 1. 1. 1. 1. 1. 0.]
[0. 1. 1. 1. 1. 1. 0.]
[0. 1. 1. 1. 1. 1. 0.]
[0. 1. 1. 1. 1. 1. 0.]
[0. 0. 0. 0. 0. 0. 0.]]