Skip to content
Snippets Groups Projects
Commit f151c637 authored by Martin Bauer's avatar Martin Bauer
Browse files

Bugfixes & Improvements for MPI parallel runs

- fix for field collection and reduction operations
- support for aligned fields in parallel data handling
parent 93ebe096
No related merge requests found
......@@ -282,7 +282,7 @@ class DataHandling(ABC):
for b in self.iterate(slice_obj, ghost_layers=ghost_layers, inner_ghost_layers=inner_ghost_layers):
m = np.min(b[array_name])
result = m if result is None else np.min(result, m)
return self.reduce_float_sequence([result], 'min')[0] if reduce else result
return self.reduce_float_sequence([result], 'min', all_reduce=True)[0] if reduce else result
def max(self, array_name, slice_obj=None, ghost_layers=False, inner_ghost_layers=False, reduce=True):
"""Returns the maximum value inside the domain or slice of the domain.
......@@ -297,7 +297,8 @@ class DataHandling(ABC):
for b in self.iterate(slice_obj, ghost_layers=ghost_layers, inner_ghost_layers=inner_ghost_layers):
m = np.max(b[array_name])
result = m if result is None else np.max(result, m)
return self.reduce_float_sequence([result], 'max')[0] if reduce else result
return self.reduce_float_sequence([result], 'max', all_reduce=True)[0] if reduce else result
def __str__(self):
result = ""
......
......@@ -100,13 +100,14 @@ class ParallelDataHandling(DataHandling):
if name in self.blocks[0] or self.GPU_DATA_PREFIX + name in self.blocks[0]:
raise ValueError("Data with this name has already been added")
if alignment:
raise NotImplementedError("Aligned field allocated not yet supported in parallel data handling")
if alignment is False or alignment is None:
alignment = 0
self._fieldInformation[name] = {'ghost_layers': ghost_layers,
'values_per_cell': values_per_cell,
'layout': layout,
'dtype': dtype}
'dtype': dtype,
'alignment': alignment}
layout_map = {'fzyx': wlb.field.Layout.fzyx, 'zyxf': wlb.field.Layout.zyxf,
'f': wlb.field.Layout.fzyx,
......@@ -114,8 +115,10 @@ class ParallelDataHandling(DataHandling):
if cpu:
wlb.field.addToStorage(self.blocks, name, dtype, fSize=values_per_cell, layout=layout_map[layout],
ghostLayers=ghost_layers)
ghostLayers=ghost_layers, alignment=alignment)
if gpu:
if alignment != 0:
raise ValueError("Alignment for walberla GPU fields not yet supported")
wlb.cuda.addGpuFieldToStorage(self.blocks, self.GPU_DATA_PREFIX + name, dtype, fSize=values_per_cell,
usePitchedMem=False, ghostLayers=ghost_layers, layout=layout_map[layout])
......@@ -305,13 +308,15 @@ class ParallelDataHandling(DataHandling):
if all_reduce:
return np.array(wlb.mpi.allreduceReal(sequence, self._reduce_map[operation.lower()]))
else:
return np.array(wlb.mpi.reduceReal(sequence, self._reduce_map[operation.lower()]))
result = np.array(wlb.mpi.reduceReal(sequence, self._reduce_map[operation.lower()], 0))
return result if wlb.mpi.worldRank() == 0 else None
def reduce_int_sequence(self, sequence, operation, all_reduce=False):
if all_reduce:
return np.array(wlb.mpi.allreduceInt(sequence, self._reduce_map[operation.lower()]))
else:
return np.array(wlb.mpi.reduceInt(sequence, self._reduce_map[operation.lower()]))
result = np.array(wlb.mpi.reduceInt(sequence, self._reduce_map[operation.lower()], 0))
return result if wlb.mpi.worldRank() == 0 else None
def create_vtk_writer(self, file_name, data_names, ghost_layers=False):
if ghost_layers is False:
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment