Commit af40fa99 authored by Martin Bauer's avatar Martin Bauer
Browse files

Worked on LBM phasefield 3 phase model

parent 10d68d25
......@@ -91,10 +91,24 @@ class DataHandling(ABC):
def fields(self):
"""Dictionary mapping data name to symbolic pystencils field - use this to create pystencils kernels"""
@property
@abstractmethod
def arrayNames(self):
"""Tuple of all array names"""
@property
@abstractmethod
def customDataNames(self):
"""Tuple of all custom data names"""
@abstractmethod
def ghostLayersOfField(self, name):
"""Returns the number of ghost layers for a specific field/array"""
@abstractmethod
def fSize(self, name):
"""Returns fSize of array"""
@abstractmethod
def iterate(self, sliceObj=None, gpu=False, ghostLayers=None, innerGhostLayers=True):
"""
......@@ -191,3 +205,53 @@ class DataHandling(ABC):
def reduceIntSequence(self, sequence, operation, allReduce=False):
"""See function reduceFloatSequence - this is the same for integers"""
def fill(self, arrayName, val, fValue=None, sliceObj=None, ghostLayers=False, innerGhostLayers=False):
if ghostLayers is True:
ghostLayers = self.ghostLayersOfField(arrayName)
if innerGhostLayers is True:
ghostLayers = self.ghostLayersOfField(arrayName)
if fValue is not None and self.fSize(arrayName) < 2:
raise ValueError("fValue parameter only valid for fields with fSize > 1")
for b in self.iterate(sliceObj, ghostLayers=ghostLayers, innerGhostLayers=innerGhostLayers):
if fValue is not None:
b[arrayName][..., fValue].fill(val)
else:
b[arrayName].fill(val)
def min(self, arrayName, sliceObj=None, ghostLayers=False, innerGhostLayers=False, reduce=True):
result = None
if ghostLayers is True:
ghostLayers = self.ghostLayersOfField(arrayName)
if innerGhostLayers is True:
ghostLayers = self.ghostLayersOfField(arrayName)
for b in self.iterate(sliceObj, ghostLayers=ghostLayers, innerGhostLayers=innerGhostLayers):
m = np.min(b[arrayName])
result = m if result is None else np.min(result, m)
return self.reduceFloatSequence([result], 'min')[0] if reduce else result
def max(self, arrayName, sliceObj=None, ghostLayers=False, innerGhostLayers=False, reduce=True):
result = None
if ghostLayers is True:
ghostLayers = self.ghostLayersOfField(arrayName)
if innerGhostLayers is True:
ghostLayers = self.ghostLayersOfField(arrayName)
for b in self.iterate(sliceObj, ghostLayers=ghostLayers, innerGhostLayers=innerGhostLayers):
m = np.max(b[arrayName])
result = m if result is None else np.max(result, m)
return self.reduceFloatSequence([result], 'max')[0] if reduce else result
def __str__(self):
result = ""
rowFormat = "{:>35}|{:>21}|{:>21}\n"
separatorLine = "" * (25 + 21 + 21 + 2) + "\n"
result += rowFormat.format("Name", "Inner (min/max)", "WithGl (min/max)")
result += separatorLine
for arrName in sorted(self.arrayNames):
innerMinMax = (self.min(arrName, ghostLayers=False), self.max(arrName, ghostLayers=False))
withGlMinMax = (self.min(arrName, ghostLayers=True), self.max(arrName, ghostLayers=True))
innerMinMax = "({0[0]:3.3g},{0[1]:3.3g})".format(innerMinMax)
withGlMinMax = "({0[0]:3.3g},{0[1]:3.3g})".format(withGlMinMax)
result += rowFormat.format(arrName, innerMinMax, withGlMinMax)
return result
......@@ -33,7 +33,7 @@ class ParallelDataHandling(DataHandling):
self._fieldInformation = {}
self._cpuGpuPairs = []
self._customDataTransferFunctions = {}
self._customDataNames = []
self._reduceMap = {
'sum': wlb.mpi.SUM,
'min': wlb.mpi.MIN,
......@@ -62,6 +62,9 @@ class ParallelDataHandling(DataHandling):
def ghostLayersOfField(self, name):
return self._fieldInformation[name]['ghostLayers']
def fSize(self, name):
return self._fieldInformation[name]['fSize']
def addCustomData(self, name, cpuCreationFunction,
gpuCreationFunction=None, cpuToGpuTransferFunc=None, gpuToCpuTransferFunc=None):
if cpuCreationFunction and gpuCreationFunction:
......@@ -73,6 +76,7 @@ class ParallelDataHandling(DataHandling):
self.blocks.addBlockData(name, cpuCreationFunction)
if gpuCreationFunction:
self.blocks.addBlockData(self.GPU_DATA_PREFIX + name, gpuCreationFunction)
self._customDataNames.append(name)
def addArray(self, name, fSize=1, dtype=np.float64, latexName=None, ghostLayers=None,
layout=None, cpu=True, gpu=False):
......@@ -126,6 +130,14 @@ class ParallelDataHandling(DataHandling):
def hasData(self, name):
return name in self._fields
@property
def arrayNames(self):
return tuple(self.fields.keys())
@property
def customDataNames(self):
return tuple(self._customDataNames)
def addArrayLike(self, name, nameOfTemplateField, latexName=None, cpu=True, gpu=False):
self.addArray(name, latexName=latexName, cpu=cpu, gpu=gpu, **self._fieldInformation[nameOfTemplateField])
......@@ -141,10 +153,15 @@ class ParallelDataHandling(DataHandling):
ghostLayers = self.defaultGhostLayers
elif ghostLayers is False:
ghostLayers = 0
elif isinstance(ghostLayers, str):
ghostLayers = self.ghostLayersOfField(ghostLayers)
if innerGhostLayers is True:
innerGhostLayers = self.defaultGhostLayers
elif innerGhostLayers is False:
innerGhostLayers = 0
elif isinstance(ghostLayers, str):
ghostLayers = self.ghostLayersOfField(ghostLayers)
prefix = self.GPU_DATA_PREFIX if gpu else ""
if sliceObj is not None:
......
......@@ -63,6 +63,9 @@ class SerialDataHandling(DataHandling):
def ghostLayersOfField(self, name):
return self._fieldInformation[name]['ghostLayers']
def fSize(self, name):
return self._fieldInformation[name]['fSize']
def addArray(self, name, fSize=1, dtype=np.float64, latexName=None, ghostLayers=None, layout=None,
cpu=True, gpu=False):
if ghostLayers is None:
......@@ -136,6 +139,8 @@ class SerialDataHandling(DataHandling):
ghostLayers = self.defaultGhostLayers
elif ghostLayers is False:
ghostLayers = 0
elif isinstance(ghostLayers, str):
ghostLayers = self.ghostLayersOfField(ghostLayers)
if sliceObj is None:
sliceObj = (slice(None, None, None),) * self.dim
......@@ -270,6 +275,14 @@ class SerialDataHandling(DataHandling):
return resultFunctor
@property
def arrayNames(self):
return tuple(self.fields.keys())
@property
def customDataNames(self):
return tuple(self.customDataCpu.keys())
@staticmethod
def reduceFloatSequence(sequence, operation, allReduce=False):
return np.array(sequence)
......
......@@ -136,9 +136,7 @@ def __upDownOffsets(d, dim):
# --------------------------------------- Advection Diffusion ----------------------------------------------------------
class Advection(sp.Function):
"""Advection term, create with advection(scalarField, vectorField)"""
@property
def scalar(self):
......@@ -170,11 +168,27 @@ class Advection(sp.Function):
for i in range(self.dim)]
return " + ".join(args)
# --- Interface for discretization strategy
def advection(scalar, vector, idx=None):
assert isinstance(scalar, Field), "Advected scalar has to be a pystencils.Field"
def velocityFieldAtOffset(self, offsetDim, offsetValue, index):
v = self.vector
if isinstance(v, Field):
assert v.indexDimensions == 1
return v.neighbor(offsetDim, offsetValue)(index)
else:
return v[index]
def advectedScalarAtOffset(self, offsetDim, offsetValue):
idx = 0 if self.scalarIndex is None else int(self.scalarIndex)
return self.scalar.neighbor(offsetDim, offsetValue)(idx)
def advection(advectedScalar, velocityField, idx=None):
"""Advection term: divergence( velocityField * advectedScalar )"""
assert isinstance(advectedScalar, Field), "Advected scalar has to be a pystencils.Field"
args = [scalar.center, vector if not isinstance(vector, Field) else vector.center]
args = [advectedScalar.center, velocityField if not isinstance(velocityField, Field) else velocityField.center]
if idx is not None:
args.append(idx)
return Advection(*args)
......@@ -207,6 +221,19 @@ class Diffusion(sp.Function):
return r"div(%s \nabla %s)" % (printer.doprint(diffCoeff),
printer.doprint(sp.Symbol(self.scalar.name+nameSuffix)))
# --- Interface for discretization strategy
def diffusionScalarAtOffset(self, offsetDim, offsetValue):
idx = 0 if self.scalarIndex is None else self.scalarIndex
return self.scalar.neighbor(offsetDim, offsetValue)(idx)
def diffusionCoefficientAtOffset(self, offsetDim, offsetValue):
d = self.diffusionCoeff
if isinstance(d, Field):
return d.neighbor(offsetDim, offsetValue)
else:
return d
def diffusion(scalar, diffusionCoeff, idx=None):
assert isinstance(scalar, Field), "Advected scalar has to be a pystencils.Field"
......@@ -219,7 +246,10 @@ def diffusion(scalar, diffusionCoeff, idx=None):
class Transient(sp.Function):
@property
def scalar(self):
return self.args[0].field
if self.scalarIndex is None:
return self.args[0].field
else:
return self.args[0].field(self.scalarIndex)
@property
def scalarIndex(self):
......@@ -244,34 +274,20 @@ class Discretization2ndOrder:
def _discretize_diffusion(self, expr):
result = 0
scalar = expr.scalar
for c in range(expr.dim):
if isinstance(expr.diffusionCoeff, Field):
firstDiffs = [offset *
(scalar.neighbor(c, offset) * expr.diffusionCoeff.neighbor(c, offset) -
scalar.center * expr.diffusionCoeff.center())
for offset in [-1, 1]]
else:
firstDiffs = [offset *
(scalar.neighbor(c, offset) * expr.diffusionCoeff -
scalar.center * expr.diffusionCoeff)
for offset in [-1, 1]]
firstDiffs = [offset *
(expr.diffusionScalarAtOffset(c, offset) * expr.diffusionCoefficientAtOffset(c, offset) -
expr.diffusionScalarAtOffset(0, 0) * expr.diffusionCoefficientAtOffset(0, 0))
for offset in [-1, 1]]
result += firstDiffs[1] - firstDiffs[0]
return result / (self.dx**2)
def _discretize_advection(self, expr):
idx = 0 if expr.scalarIndex is None else int(expr.scalarIndex)
result = 0
for c in range(expr.dim):
if isinstance(expr.vector, Field):
assert expr.vector.indexDimensions == 1
interpolated = [(expr.scalar.neighbor(c, offset)(idx) * expr.vector.neighbor(c, offset)(c) +
expr.scalar.neighbor(c, 0)(idx) * expr.vector.neighbor(c, 0)(c)) / 2
for offset in [-1, 1]]
else:
interpolated = [(expr.scalar.neighbor(c, offset)(idx) * expr.vector(c) -
expr.scalar.neighbor(c, 0)(idx) * expr.vector(c)) / 2
for offset in [-1, 1]]
interpolated = [(expr.advectedScalarAtOffset(c, offset) * expr.velocityFieldAtOffset(c, offset, c) +
expr.advectedScalarAtOffset(c, 0) * expr.velocityFieldAtOffset(c, 0, c)) / 2
for offset in [-1, 1]]
result += interpolated[1] - interpolated[0]
return result / self.dx
......@@ -300,4 +316,3 @@ class Discretization2ndOrder:
else:
raise NotImplementedError("Cannot discretize expression with more than one transient term")
class KernelStep:
def __init__(self, dataHandling, updateRule):
# fields to sync before
# GPU sync fields
pass
import time
class TimeLoop:
def __init__(self):
self._preRunFunctions = []
self._postRunFunctions = []
self._timeStepFunctions = []
self.timeStepsRun = 0
def addStep(self, stepObj):
if hasattr(stepObj, 'preRun'):
self.addPreRunFunction(stepObj.preRun)
if hasattr(stepObj, 'postRun'):
self.addPostRunFunction(stepObj.postRun)
self.add(stepObj.timeStep)
def add(self, timeStepFunction):
self._timeStepFunctions.append(timeStepFunction)
def addKernel(self, dataHandling, kernelFunc):
self._timeStepFunctions.append(lambda: dataHandling.runKernel(kernelFunc))
def addPreRunFunction(self, f):
self._preRunFunctions.append(f)
def addPostRunFunction(self, f):
self._postRunFunctions.append(f)
def run(self, timeSteps=0):
self.preRun()
try:
for i in range(timeSteps):
self.timeStep()
except KeyboardInterrupt:
pass
self.postRun()
def benchmarkRun(self, timeSteps=0, initTimeSteps=0):
self.preRun()
for i in range(initTimeSteps):
self.timeStep()
start = time.perf_counter()
for i in range(timeSteps):
self.timeStep()
end = time.perf_counter()
self.postRun()
timeForOneIteration = (end - start) / timeSteps
return timeForOneIteration
def benchmark(self, timeForBenchmark=5, initTimeSteps=10, numberOfTimeStepsForEstimation=20):
"""
Returns the time in seconds for one time step
:param timeForBenchmark: number of seconds benchmark should take
:param initTimeSteps: number of time steps run initially for warm up, to get arrays into cache etc
:param numberOfTimeStepsForEstimation: time steps run before real benchmarks, to determine number of time steps
that approximately take 'timeForBenchmark'
"""
# Run a few time step to get first estimate
durationOfTimeStep = self.benchmarkRun(numberOfTimeStepsForEstimation, initTimeSteps)
# Run for approximately 'timeForBenchmark' seconds
timeSteps = int(timeForBenchmark / durationOfTimeStep)
timeSteps = max(timeSteps, 20)
return self.benchmarkRun(timeSteps, initTimeSteps)
def preRun(self):
for f in self._preRunFunctions:
f()
def postRun(self):
for f in self._postRunFunctions:
f()
def timeStep(self):
for f in self._timeStepFunctions:
f()
self.timeStepsRun += 1
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment