Skip to content
Snippets Groups Projects
Commit 19f9750d authored by Martin Bauer's avatar Martin Bauer
Browse files

Parallel data handling now also supports checkpointing

- update waLBerla package in order to make use of this function!
parent 21fcd7fc
No related merge requests found
...@@ -300,6 +300,17 @@ class DataHandling(ABC): ...@@ -300,6 +300,17 @@ class DataHandling(ABC):
return self.reduce_float_sequence([result], 'max', all_reduce=True)[0] if reduce else result return self.reduce_float_sequence([result], 'max', all_reduce=True)[0] if reduce else result
def save_all(self, file):
"""Saves all field data to disk into a file"""
def load_all(self, file):
"""Loads all field data from disk into a file
Works only if save_all was called with exactly the same field sizes, layouts etc.
When run in parallel save and load has to be called with the same number of processes.
Use for check pointing only - to store results use VTK output
"""
def __str__(self): def __str__(self):
result = "" result = ""
......
import os
import numpy as np import numpy as np
import warnings import warnings
from pystencils import Field from pystencils import Field
...@@ -369,3 +370,17 @@ class ParallelDataHandling(DataHandling): ...@@ -369,3 +370,17 @@ class ParallelDataHandling(DataHandling):
@property @property
def world_rank(self): def world_rank(self):
return wlb.mpi.worldRank() return wlb.mpi.worldRank()
def save_all(self, directory):
if not os.path.exists(directory):
os.mkdir(directory)
if os.path.isfile(directory):
raise RuntimeError("Trying to save to {}, but file exists already".format(directory))
for field_name, data_name in self._field_name_to_cpu_data_name.items():
self.blocks.writeBlockData(data_name, os.path.join(directory, field_name + ".dat"))
def load_all(self, directory):
for field_name, data_name in self._field_name_to_cpu_data_name.items():
self.blocks.readBlockData(data_name, os.path.join(directory, field_name + ".dat"))
...@@ -392,7 +392,7 @@ class SerialDataHandling(DataHandling): ...@@ -392,7 +392,7 @@ class SerialDataHandling(DataHandling):
continue continue
if file_contents[arr_name].shape != arr_contents.shape: if file_contents[arr_name].shape != arr_contents.shape:
print("Skipping read data {} because shapes don't match. " print("Skipping read data {} because shapes don't match. "
"Read array shape {}, exising array shape {}".format(arr_name, file_contents[arr_name].shape, "Read array shape {}, existing array shape {}".format(arr_name, file_contents[arr_name].shape,
arr_contents.shape)) arr_contents.shape))
continue continue
np.copyto(arr_contents, file_contents[arr_name]) np.copyto(arr_contents, file_contents[arr_name])
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment