Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
No results found
Show changes
Commits on Source (263)
Showing
with 1763 additions and 1722 deletions
[flake8]
max-line-length=120
exclude=pystencils/jupyter.py,
pystencils/plot.py
pystencils/session.py
exclude=src/pystencils/jupyter.py,
src/pystencils/plot.py
src/pystencils/session.py
ignore = W293 W503 W291 C901 E741
pystencils/_version.py export-subst
src/pystencils/_version.py export-subst
......@@ -5,7 +5,7 @@ __pycache__
*.vti
/build
/dist
/*.egg-info
*.egg-info
.cache
_build
/html_doc
......@@ -15,15 +15,16 @@ _build
_local_tmp
RELEASE-VERSION
test-report
pystencils/boundaries/createindexlistcython.c
pystencils/boundaries/createindexlistcython.*.so
pystencils_tests/tmp
pystencils_tests/var
pystencils_tests/kerncraft_inputs/.2d-5pt.c_kerncraft/
pystencils_tests/kerncraft_inputs/.3d-7pt.c_kerncraft/
src/pystencils/boundaries/createindexlistcython.c
src/pystencils/boundaries/createindexlistcython.*.so
tests/tmp
tests/var
tests/kerncraft_inputs/.2d-5pt.c_kerncraft/
tests/kerncraft_inputs/.3d-7pt.c_kerncraft/
report.xml
coverage_report/
# macOS
**/.DS_Store
*.uuid
stages:
- pretest
- test
- nightly
- docs
- deploy
# -------------------------- Templates ------------------------------------------------------------------------------------
# Base configuration for jobs meant to run at every commit
.every-commit:
rules:
- if: $CI_PIPELINE_SOURCE != "schedule"
# Configuration for jobs meant to run on each commit to pycodegen/pystencils/master
.every-commit-master:
rules:
- if: '$CI_PIPELINE_SOURCE != "schedule" && $CI_PROJECT_PATH == "pycodegen/pystencils" && $CI_COMMIT_BRANCH == "master"'
# Base configuration for jobs meant to run at a schedule
.scheduled:
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
# -------------------------- Tests ------------------------------------------------------------------------------------
# Normal test - runs on every commit all but "long run" tests
tests-and-coverage:
stage: pretest
except:
variables:
- $ENABLE_NIGHTLY_BUILDS
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full
extends: .every-commit
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full:cupy12.3
before_script:
- pip install -e .
script:
- pip install sympy --upgrade
- env
- pip list
- export NUM_CORES=$(nproc --all)
- mkdir -p ~/.config/matplotlib
- echo "backend:template" > ~/.config/matplotlib/matplotlibrc
- mkdir public
- py.test -v -n $NUM_CORES --cov-report html --cov-report term --cov=. -m "not longrun" --html test-report/index.html --junitxml=report.xml
- python3 -m coverage xml
- pytest -v -n $NUM_CORES --cov-report html --cov-report xml --cov-report term --cov=. -m "not longrun" --html test-report/index.html --junitxml=report.xml
- python -m coverage xml
tags:
- docker
- cuda11
- AVX
coverage: /Total coverage:\s\d+.\d+\%/
artifacts:
when: always
paths:
- coverage_report
- test-report
reports:
cobertura: coverage.xml
coverage_report:
coverage_format: cobertura
path: coverage.xml
junit: report.xml
# pipeline with latest python version
latest-python:
# Normal test with longruns
tests-and-coverage-with-longrun:
stage: test
except:
variables:
- $ENABLE_NIGHTLY_BUILDS
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/latest_python
when: manual
allow_failure: true
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full:cupy12.3
before_script:
- pip install sympy --upgrade
- pip install -e .
script:
- env
- pip list
......@@ -50,74 +73,66 @@ latest-python:
- mkdir -p ~/.config/matplotlib
- echo "backend:template" > ~/.config/matplotlib/matplotlibrc
- mkdir public
- py.test -v -n $NUM_CORES -m "not longrun" --junitxml=report.xml
- py.test -v -n $NUM_CORES
tags:
- docker
- cuda11
- AVX
artifacts:
when: always
reports:
junit: report.xml
# Nightly test - runs "long run" jobs only
test-longrun:
# pipeline with latest python version
latest-python:
stage: test
only:
variables:
- $ENABLE_NIGHTLY_BUILDS
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full
extends: .every-commit
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/latest_python
before_script:
- pip install -e .
script:
- env
- pip list
- pip install -e .
- export NUM_CORES=$(nproc --all)
- mkdir -p ~/.config/matplotlib
- echo "backend:template" > ~/.config/matplotlib/matplotlibrc
- py.test -v -n $NUM_CORES --cov-report html --cov-report term --cov=. --html test-report/index.html --junitxml=report.xml
- mkdir public
- py.test -v -n $NUM_CORES -m "not longrun" --junitxml=report.xml
tags:
- docker
- cuda11
- AVX
artifacts:
when: always
paths:
- coverage_report
- test-report
reports:
junit: report.xml
# Minimal tests in windows environment
minimal-windows:
stage: test
except:
variables:
- $ENABLE_NIGHTLY_BUILDS
tags:
- win
script:
- export NUM_CORES=$(nproc --all)
- source /cygdrive/c/Users/build/Miniconda3/Scripts/activate
- source activate pystencils
- pip install joblib
- pip list
- python -c "import numpy"
- py.test -v -m "not (notebook or longrun)"
#minimal-windows:
# stage: test
# tags:
# - win
# script:
# - export NUM_CORES=$(nproc --all)
# - source /cygdrive/c/Users/build/Miniconda3/Scripts/activate
# - source activate pystencils
# - pip install joblib
# - pip list
# - python -c "import numpy"
# - py.test -v -m "not (notebook or longrun)"
ubuntu:
stage: test
except:
variables:
- $ENABLE_NIGHTLY_BUILDS
extends: .every-commit
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/ubuntu
before_script:
# - apt-get -y remove python3-sympy
- ln -s /usr/include/locale.h /usr/include/xlocale.h
# - pip3 install `grep -Eo 'sympy[>=]+[0-9\.]+' setup.py | sed 's/>/=/g'`
- pip3 install -e .
script:
- export NUM_CORES=$(nproc --all)
- mkdir -p ~/.config/matplotlib
- echo "backend:template" > ~/.config/matplotlib/matplotlibrc
- sed -i 's/--doctest-modules //g' pytest.ini
- env
- pip3 list
- pytest-3 -v -n $NUM_CORES -m "not longrun" --junitxml=report.xml
- pip list
- pytest -v -n $NUM_CORES -m "not longrun" --junitxml=report.xml
tags:
- docker
- cuda11
......@@ -129,10 +144,11 @@ ubuntu:
.multiarch_template:
stage: test
except:
variables:
- $ENABLE_NIGHTLY_BUILDS
extends: .every-commit
allow_failure: true
before_script: &multiarch_before_script
# - pip3 install -v .
- export PYTHONPATH=src
- python3 -c "import pystencils as ps; ps.cpu.cpujit.read_config()"
- sed -i '/^fail_under.*/d' pytest.ini
script:
......@@ -142,20 +158,26 @@ ubuntu:
- sed -i 's/--doctest-modules //g' pytest.ini
- env
- pip3 list
- pytest-3 -v -n $NUM_CORES --junitxml=report.xml pystencils_tests/test_*vec*.py pystencils_tests/test_random.py
- python3 -m pytest -v -n $NUM_CORES --cov-report html --cov-report xml --cov=. --junitxml=report.xml tests/test_*vec*.py tests/test_random.py tests/test_half_precision.py
- python3 -m coverage xml
tags:
- docker
- AVX
artifacts:
when: always
paths:
- coverage_report
reports:
coverage_report:
coverage_format: cobertura
path: coverage.xml
junit: report.xml
arm64v8:
extends: .multiarch_template
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/arm64
variables:
PYSTENCILS_SIMD: "neon"
QEMU_CPU: "cortex-a76"
before_script:
- *multiarch_before_script
- sed -i s/march=native/march=armv8-a/g ~/.config/pystencils/config.json
......@@ -163,51 +185,41 @@ arm64v8:
ppc64le:
extends: .multiarch_template
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/ppc64le
variables:
PYSTENCILS_SIMD: "vsx"
before_script:
- *multiarch_before_script
- sed -i s/mcpu=native/mcpu=power8/g ~/.config/pystencils/config.json
arm64v9:
# Compiler support for SVE is still pretty rough: GCC 10+11 produce incorrect code for fixed-width vectors,
# while Clang 12 produces memory-corrupting heisenbugs unless we enable the address sanitizer.
# In the RNG tests, GCC 10+11 produce an internal compiler error.
# The memory corruption seems to only happen with qemu-user, not with qemu-system.
# Once the compilers and QEMU have improved, this job should be cleaned up to match the others.
# SVE support is still unreliable in GCC 11 (incorrect code for fixed-width vectors, internal compiler errors).
# For half precision Clang is necessary
extends: .multiarch_template
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/arm64
variables:
PYSTENCILS_SIMD: "sve256,sve512,sve"
ASAN_OPTIONS: detect_leaks=0
LD_PRELOAD: /usr/lib/aarch64-linux-gnu/libasan.so.6
before_script:
- *multiarch_before_script
- sed -i s/march=native/march=armv8-a+sve/g ~/.config/pystencils/config.json
- sed -i s/march=native/march=armv9-a+sve2+sme/g ~/.config/pystencils/config.json
- sed -i s/g\+\+/clang++/g ~/.config/pystencils/config.json
riscv64:
# The RISC-V vector extension is still experimental and needs special compiler flags.
# Once they are officially released, this job should be cleaned up to match the others.
# RISC-V vector extension are currently not supported by GCC.
extends: .multiarch_template
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/riscv64
variables:
# explicitly set SIMD as detection requires QEMU >= 8.1
PYSTENCILS_SIMD: "rvv"
QEMU_CPU: "rv64,v=true"
QEMU_CPU: "rv64,v=true,zicboz=true"
before_script:
- *multiarch_before_script
- sed -i 's/march=native/march=rv64imfdv0p10 -menable-experimental-extensions/g' ~/.config/pystencils/config.json
- sed -i s/g\+\+/clang++/g ~/.config/pystencils/config.json
- sed -i 's/fopenmp/fopenmp=libgomp -I\/usr\/include\/riscv64-linux-gnu/g' ~/.config/pystencils/config.json
- sed -i 's/march=native/march=rv64imfdvzicboz/g' ~/.config/pystencils/config.json
- sed -i s/g\+\+/clang++-15/g ~/.config/pystencils/config.json
minimal-conda:
stage: pretest
except:
variables:
- $ENABLE_NIGHTLY_BUILDS
extends: .every-commit
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/minimal_conda
before_script:
- pip install -e .
script:
- python setup.py quicktest
- python quicktest.py
tags:
- docker
- cuda
......@@ -215,13 +227,13 @@ minimal-conda:
minimal-sympy-master:
stage: test
except:
variables:
- $ENABLE_NIGHTLY_BUILDS
extends: .every-commit
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/minimal_conda
before_script:
- pip install -e .
script:
- python -m pip install --upgrade git+https://github.com/sympy/sympy.git
- python setup.py quicktest
- python quicktest.py
allow_failure: true
tags:
- docker
......@@ -247,7 +259,6 @@ pycodegen-integration:
- cd ..
- pip install -e pystencils/
- pip install -e lbmpy/
- pip install -e pygrandchem/
- cmake --version
- ./install_walberla.sh
- export NUM_CORES=$(nproc --all)
......@@ -257,14 +268,12 @@ pycodegen-integration:
- py.test -v -n $NUM_CORES --junitxml=report.xml .
- cd ../lbmpy
- py.test -v -n $NUM_CORES --junitxml=report.xml .
- cd ../pygrandchem
- py.test -v -n $NUM_CORES --junitxml=report.xml .
- cd ../walberla/build/
- make -j $NUM_CORES CodegenJacobiCPU CodegenJacobiGPU CodegenPoissonCPU CodegenPoissonGPU MicroBenchmarkGpuLbm LbCodeGenerationExample
- make -j $NUM_CORES multiphaseCPU multiphaseGPU FluctuatingMRT FlowAroundSphereCodeGen
- make -j $NUM_CORES multiphaseCPU multiphaseGPU FluctuatingMRT FlowAroundSphereCodeGen FieldLayoutAndVectorizationTest GeneratedOutflowBC
- cd apps/benchmarks/UniformGridGPU
- make -j $NUM_CORES
- cd ../UniformGridGenerated
- cd ../UniformGridCPU
- make -j $NUM_CORES
tags:
- docker
......@@ -275,26 +284,57 @@ pycodegen-integration:
reports:
junit: pycodegen/*/report.xml
# -------------------- Scheduled Tasks --------------------------------------------------------------------------
# Nightly test against the latest (pre-release) version of SymPy published on PyPI
nightly-sympy:
stage: nightly
needs: []
extends: .scheduled
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/latest_python
before_script:
- pip install -e .
- pip install --upgrade --pre sympy
script:
- env
- pip list
- export NUM_CORES=$(nproc --all)
- mkdir -p ~/.config/matplotlib
- echo "backend:template" > ~/.config/matplotlib/matplotlibrc
- mkdir public
- pytest -v -n $NUM_CORES -m "not longrun" --junitxml=report.xml
tags:
- docker
- AVX
- cuda
artifacts:
when: always
reports:
junit: report.xml
# -------------------- Linter & Documentation --------------------------------------------------------------------------
flake8-lint:
stage: pretest
except:
variables:
- $ENABLE_NIGHTLY_BUILDS
extends: .every-commit
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full
script:
- flake8 pystencils
- flake8 src/pystencils
tags:
- docker
build-documentation:
stage: test
stage: docs
extends: .every-commit
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/documentation
needs: []
before_script:
- pip install -e .
script:
- export PYTHONPATH=`pwd`
- mkdir html_doc
- sphinx-build -b html doc html_doc
- sphinx-build -W -b html doc html_doc
......@@ -307,7 +347,9 @@ build-documentation:
pages:
image: i10git.cs.fau.de:5005/pycodegen/pycodegen/full
extends: .every-commit-master
stage: deploy
needs: ["tests-and-coverage", "build-documentation"]
script:
- ls -l
- mv coverage_report html_doc
......@@ -317,5 +359,3 @@ pages:
- public
tags:
- docker
only:
- master@pycodegen/pystencils
include README.md
include COPYING.txt
include AUTHORS.txt
include CONTRIBUTING.md
CHANGELOG.md
global-include *.pyx
include versioneer.py
include pystencils/_version.py
include CHANGELOG.md
......@@ -52,7 +52,7 @@ pip install pystencils[interactive]
Without `[interactive]` you get a minimal version with very little dependencies.
All options:
- `gpu`: use this if an NVIDIA GPU is available and CUDA is installed
- `gpu`: use this if an NVIDIA or AMD GPU is available and CUDA or ROCm is installed
- `alltrafos`: pulls in additional dependencies for loop simplification e.g. libisl
- `bench_db`: functionality to store benchmark result in object databases
- `interactive`: installs dependencies to work in Jupyter including image I/O, plotting etc.
......@@ -63,7 +63,7 @@ Options can be combined e.g.
pip install pystencils[interactive, gpu, doc]
```
pystencils is also fully compatible with Windows machines. If working with visual studio and pycuda makes sure to run example files first to ensure that pycuda can find the compiler's executable.
pystencils is also fully compatible with Windows machines. If working with visual studio and cupy makes sure to run example files first to ensure that cupy can find the compiler's executable.
Documentation
-------------
......@@ -81,4 +81,7 @@ Many thanks go to the [contributors](https://i10git.cs.fau.de/pycodegen/pystenci
If you use pystencils in a publication, please cite the following articles:
Overview:
- M. Bauer et al, Code Generation for Massively Parallel Phase-Field Simulations. Association for Computing Machinery, 2019. https://doi.org/10.1145/3295500.3356186
\ No newline at end of file
- M. Bauer et al, Code Generation for Massively Parallel Phase-Field Simulations. Association for Computing Machinery, 2019. https://doi.org/10.1145/3295500.3356186
Performance Modelling:
- D. Ernst et al, Analytical performance estimation during code generation on modern GPUs. Journal of Parallel and Distributed Computing, 2023. https://doi.org/10.1016/j.jpdc.2022.11.003
......@@ -7,7 +7,7 @@
# conda env create -f conda_environment_user.yml
# . activate pystencils
#
# If you have CUDA installed and want to use your GPU, uncomment the last line to install pycuda
# If you have CUDA or ROCm installed and want to use your GPU, uncomment the last line to install cupy
#
# ----------------------------------------------------------------------------------------------------------------------
......@@ -32,4 +32,4 @@ dependencies:
- ipy_table # HTML tables for jupyter notebooks
- pyevtk # VTK output for serial simulations
- blitzdb # file-based No-SQL database to store simulation results
#- pycuda # add this if you have CUDA installed
#- cupy # add this if you have CUDA or ROCm installed
......@@ -8,7 +8,7 @@ import nbformat
import pytest
from nbconvert import PythonExporter
from pystencils.boundaries.createindexlistcython import * # NOQA
from pystencils.boundaries.createindexlist import * # NOQA
# Trigger config file reading / creation once - to avoid race conditions when multiple instances are creating it
# at the same time
from pystencils.cpu import cpujit
......@@ -40,51 +40,48 @@ def add_path_to_ignore(path):
collect_ignore = [os.path.join(SCRIPT_FOLDER, "doc", "conf.py"),
os.path.join(SCRIPT_FOLDER, "pystencils", "opencl", "opencl.autoinit")]
add_path_to_ignore('pystencils_tests/benchmark')
os.path.join(SCRIPT_FOLDER, "src", "pystencils", "opencl", "opencl.autoinit")]
add_path_to_ignore('tests/benchmark')
add_path_to_ignore('_local_tmp')
try:
import pycuda
import cupy
except ImportError:
collect_ignore += [os.path.join(SCRIPT_FOLDER, "pystencils_tests/test_cudagpu.py")]
add_path_to_ignore('pystencils/gpucuda')
collect_ignore += [os.path.join(SCRIPT_FOLDER, "tests/test_gpu.py")]
add_path_to_ignore('src/pystencils/gpu')
try:
import waLBerla
except ImportError:
collect_ignore += [os.path.join(SCRIPT_FOLDER, "pystencils_tests/test_aligned_array.py"),
os.path.join(SCRIPT_FOLDER, "pystencils_tests/test_datahandling_parallel.py"),
collect_ignore += [os.path.join(SCRIPT_FOLDER, "tests/test_aligned_array.py"),
os.path.join(SCRIPT_FOLDER, "tests/test_datahandling_parallel.py"),
os.path.join(SCRIPT_FOLDER, "doc/notebooks/03_tutorial_datahandling.ipynb"),
os.path.join(SCRIPT_FOLDER, "pystencils/datahandling/parallel_datahandling.py"),
os.path.join(SCRIPT_FOLDER, "pystencils_tests/test_small_block_benchmark.ipynb")]
os.path.join(SCRIPT_FOLDER, "src/pystencils/datahandling/parallel_datahandling.py"),
os.path.join(SCRIPT_FOLDER, "tests/test_small_block_benchmark.ipynb")]
try:
import blitzdb
except ImportError:
add_path_to_ignore('pystencils/runhelper')
collect_ignore += [os.path.join(SCRIPT_FOLDER, "pystencils_tests/test_parameterstudy.py")]
add_path_to_ignore('src/pystencils/runhelper')
collect_ignore += [os.path.join(SCRIPT_FOLDER, "tests/test_parameterstudy.py")]
collect_ignore += [os.path.join(SCRIPT_FOLDER, "tests/test_json_serializer.py")]
try:
import islpy
except ImportError:
collect_ignore += [os.path.join(SCRIPT_FOLDER, "pystencils/integer_set_analysis.py")]
collect_ignore += [os.path.join(SCRIPT_FOLDER, "src/pystencils/integer_set_analysis.py")]
try:
import graphviz
except ImportError:
collect_ignore += [os.path.join(SCRIPT_FOLDER, "pystencils/backends/dot.py")]
collect_ignore += [os.path.join(SCRIPT_FOLDER, "src/pystencils/backends/dot.py")]
collect_ignore += [os.path.join(SCRIPT_FOLDER, "doc/notebooks/01_tutorial_getting_started.ipynb")]
try:
import pyevtk
except ImportError:
collect_ignore += [os.path.join(SCRIPT_FOLDER, "pystencils/datahandling/vtk.py")]
# TODO: Remove if Ubuntu 18.04 is no longer supported
if pytest_version < 50403:
collect_ignore += [os.path.join(SCRIPT_FOLDER, "pystencils_tests/test_jupyter_extensions.ipynb")]
collect_ignore += [os.path.join(SCRIPT_FOLDER, "src/pystencils/datahandling/vtk.py")]
collect_ignore += [os.path.join(SCRIPT_FOLDER, 'setup.py')]
......
......@@ -26,14 +26,14 @@ templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
copyright = f'{datetime.datetime.now().year}, Martin Bauer'
author = 'Martin Bauer'
copyright = f'{datetime.datetime.now().year}, Martin Bauer, Markus Holzer, Frederik Hennig'
author = 'Martin Bauer, Markus Holzer, Frederik Hennig'
# The short X.Y version (including .devXXXX, rcX, b1 suffixes if present)
version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', pystencils.__version__)
version = re.sub(r'(\.dev\d+).*?$', r'\1', version)
# The full version, including alpha/beta/rc tags.
release = pystencils.__version__
language = None
language = 'en'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
default_role = 'any'
pygments_style = 'sphinx'
......
This source diff could not be displayed because it is too large. You can view the blob instead.
%% Cell type:code id: tags:
``` python
from pystencils.session import *
```
%% Cell type:markdown id: tags:
# Tutorial 02: Basic Kernel generation with *pystencils*
Now that you have an [overview of pystencils](01_tutorial_getting_started.ipynb),
this tutorial shows in more detail how to formulate, optimize and run stencil kernels.
## 1) Kernel Definition
### a) Defining kernels with assignment lists and the `kernel` decorator
*pystencils* gets a symbolic formulation of the kernel. This can be either an `Assignment` or a sequence of `Assignment`s that follow a set of restrictions.
Lets first create a kernel that consists of multiple assignments:
%% Cell type:code id: tags:
``` python
src_arr = np.zeros([20, 30])
dst_arr = np.zeros_like(src_arr)
dst, src = ps.fields(dst=dst_arr, src=src_arr)
```
%% Cell type:code id: tags:
``` python
grad_x, grad_y = sp.symbols("grad_x, grad_y")
symbolic_description = [
ps.Assignment(grad_x, (src[1, 0] - src[-1, 0]) / 2),
ps.Assignment(grad_y, (src[0, 1] - src[0, -1]) / 2),
ps.Assignment(dst[0, 0], grad_x + grad_y),
]
kernel = ps.create_kernel(symbolic_description)
symbolic_description
```
%% Output
$\displaystyle \left[ grad_{x} \leftarrow \frac{{src}_{(1,0)}}{2} - \frac{{src}_{(-1,0)}}{2}, \ grad_{y} \leftarrow \frac{{src}_{(0,1)}}{2} - \frac{{src}_{(0,-1)}}{2}, \ {dst}_{(0,0)} \leftarrow grad_{x} + grad_{y}\right]$
$\displaystyle \left[ grad_{x} \leftarrow_{} \frac{{src}_{(1,0)}}{2} - \frac{{src}_{(-1,0)}}{2}, \ grad_{y} \leftarrow_{} \frac{{src}_{(0,1)}}{2} - \frac{{src}_{(0,-1)}}{2}, \ {dst}_{(0,0)} \leftarrow_{} grad_{x} + grad_{y}\right]$
⎡ src_E src_W src_N src_S ⎤
⎢gradₓ := ───── - ─────, grad_y := ───── - ─────, dst_C := gradₓ + grad_y⎥
⎣ 2 2 2 2 ⎦
%% Cell type:markdown id: tags:
We created subexpressions, using standard sympy symbols on the left hand side, to split the kernel into multiple assignments. Defining a kernel using a list of `Assignment`s is quite tedious and hard to read.
To simplify the formulation of a kernel, *pystencils* offers the `kernel` decorator, that transforms a normal Python function with `@=` assignments into an assignment list that can be passed to `create_kernel`.
%% Cell type:code id: tags:
``` python
@ps.kernel
def symbolic_description_using_function():
grad_x @= (src[1, 0] - src[-1, 0]) / 2
grad_y @= (src[0, 1] - src[0, -1]) / 2
dst[0, 0] @= grad_x + grad_y
symbolic_description_using_function
```
%% Output
$\displaystyle \left[ grad_{x} \leftarrow \frac{{src}_{(1,0)}}{2} - \frac{{src}_{(-1,0)}}{2}, \ grad_{y} \leftarrow \frac{{src}_{(0,1)}}{2} - \frac{{src}_{(0,-1)}}{2}, \ {dst}_{(0,0)} \leftarrow grad_{x} + grad_{y}\right]$
$\displaystyle \left[ grad_{x} \leftarrow_{} \frac{{src}_{(1,0)}}{2} - \frac{{src}_{(-1,0)}}{2}, \ grad_{y} \leftarrow_{} \frac{{src}_{(0,1)}}{2} - \frac{{src}_{(0,-1)}}{2}, \ {dst}_{(0,0)} \leftarrow_{} grad_{x} + grad_{y}\right]$
⎡ src_E src_W src_N src_S ⎤
⎢gradₓ := ───── - ─────, grad_y := ───── - ─────, dst_C := gradₓ + grad_y⎥
⎣ 2 2 2 2 ⎦
%% Cell type:markdown id: tags:
The decorated function can contain any Python code, only the `@=` operator, and the ternary inline `if-else` operator have different meaning.
### b) Ternary 'if' with `Piecewise`
The ternary operator maps to `sympy.Piecewise` functions, that can be used to introduce branching into the kernel. Piecewise defined functions must give a value for every input, i.e. there must be a 'otherwise' clause in the end that is indicated by the condition `True`. Piecewise objects are standard sympy terms that can be integrated into bigger expressions:
%% Cell type:code id: tags:
``` python
sp.Piecewise((1.0, src[0,1] > 0), (0.0, True)) + src[1, 0]
```
%% Output
$\displaystyle {src}_{(1,0)} + \begin{cases} 1.0 & \text{for}\: {src}_{(0,1)} > 0 \\0.0 & \text{otherwise} \end{cases}$
⎛⎧1.0 for src_N > 0⎞
src_E + ⎜⎨ ⎟
⎝⎩0.0 otherwise ⎠
%% Cell type:markdown id: tags:
Piecewise objects are created by the `kernel` decorator for ternary if-else statements.
%% Cell type:code id: tags:
``` python
@ps.kernel
def kernel_with_piecewise():
grad_x @= (src[1, 0] - src[-1, 0]) / 2 if src[-1, 0] > 0 else 0.0
kernel_with_piecewise
```
%% Output
$\displaystyle \left[ grad_{x} \leftarrow \begin{cases} \frac{{src}_{(1,0)}}{2} - \frac{{src}_{(-1,0)}}{2} & \text{for}\: {src}_{(-1,0)} > 0 \\0.0 & \text{otherwise} \end{cases}\right]$
$\displaystyle \left[ grad_{x} \leftarrow_{} \begin{cases} \frac{{src}_{(1,0)}}{2} - \frac{{src}_{(-1,0)}}{2} & \text{for}\: {src}_{(-1,0)} > 0 \\0.0 & \text{otherwise} \end{cases}\right]$
⎡ ⎧src_E src_W ⎤
⎢ ⎪───── - ───── for src_W > 0⎥
⎢gradₓ := ⎨ 2 2 ⎥
⎢ ⎪ ⎥
⎣ ⎩ 0.0 otherwise ⎦
%% Cell type:markdown id: tags:
### c) Assignment level optimizations using `AssignmentCollection`
When the kernels get larger and more complex, it is helpful to organize the list of assignment into a more structured way. The `AssignmentCollection` offers optimizating transformation on a list of assignments. It holds two assignment lists, one for subexpressions and one for the main assignments. Main assignments are typically those that write to an array.
%% Cell type:code id: tags:
``` python
@ps.kernel
def somewhat_longer_dummy_kernel(s):
s.a @= src[0, 1] + src[-1, 0]
s.b @= 2 * src[1, 0] + src[0, -1]
s.c @= src[0, 1] + 2 * src[1, 0] + src[-1, 0] + src[0, -1] - src[0,0]
dst[0, 0] @= s.a + s.b + s.c
ac = ps.AssignmentCollection(main_assignments=somewhat_longer_dummy_kernel[-1:],
subexpressions=somewhat_longer_dummy_kernel[:-1])
ac
```
%% Output
AssignmentCollection: dst_C, <- f(src_W, src_S, src_N, src_C, src_E)
AssignmentCollection: dst_C, <- f(src_C, src_W, src_S, src_N, src_E)
%% Cell type:code id: tags:
``` python
ac.operation_count
```
%% Output
{'adds': 8,
'muls': 2,
'divs': 0,
'sqrts': 0,
'fast_sqrts': 0,
'fast_inv_sqrts': 0,
'fast_div': 0}
%% Cell type:markdown id: tags:
The `pystencils.simp` submodule offers several functions to optimize a collection of assignments.
It also offers functionality to group optimization into strategies and evaluate them.
In this example we reduce the number of operations by reusing existing subexpressions to get rid of two unnecessary floating point additions. For more information about assignment collections and simplifications see the [demo notebook](demo_assignment_collection.ipynb).
%% Cell type:code id: tags:
``` python
opt_ac = ps.simp.subexpression_substitution_in_existing_subexpressions(ac)
opt_ac
```
%% Output
AssignmentCollection: dst_C, <- f(src_W, src_S, src_N, src_C, src_E)
AssignmentCollection: dst_C, <- f(src_C, src_W, src_S, src_N, src_E)
%% Cell type:code id: tags:
``` python
opt_ac.operation_count
```
%% Output
{'adds': 6,
'muls': 1,
'divs': 0,
'sqrts': 0,
'fast_sqrts': 0,
'fast_inv_sqrts': 0,
'fast_div': 0}
%% Cell type:markdown id: tags:
### d) Ghost layers and iteration region
When creating a kernel with neighbor accesses, *pystencils* automatically restricts the iteration region, such that all accesses are safe.
%% Cell type:code id: tags:
``` python
kernel = ps.create_kernel(ps.Assignment(dst[0,0], src[2, 0] + src[-1, 0]))
ps.show_code(kernel)
```
%% Output
%% Cell type:markdown id: tags:
When no additional ghost layer information is given, *pystencils* looks at all neighboring field accesses and introduces the required number of ghost layers **for all directions**. In the example above the largest neighbor accesses was ``src[2, 0]``, so theoretically we would need 2 ghost layers only the the end of the x coordinate.
By default *pystencils* introduces 2 ghost layers at all borders of the domain. The next cell shows how to change this behavior. Be careful with manual ghost layer specification, wrong values may lead to SEGFAULTs.
%% Cell type:code id: tags:
``` python
gl_spec = [(0, 2), # 0 ghost layers at the left, 2 at the right border
(1, 0)] # 1 ghost layer at the lower y, one at the upper y coordinate
kernel = ps.create_kernel(ps.Assignment(dst[0,0], src[2, 0] + src[-1, 0]), ghost_layers=gl_spec)
ps.show_code(kernel)
```
%% Output
%% Cell type:markdown id: tags:
## 2 ) Restrictions
### a) Independence Restriction
*pystencils* only works for kernels where each array element can be updated independently from all other elements. This restriction ensures that the kernels can be easily parallelized and also be run on the GPU. Trying to define kernels where the results depends on the iteration order, leads to a ValueError.
%% Cell type:code id: tags:
``` python
invalid_description = [
ps.Assignment(dst[1, 0], src[1, 0] + src[-1, 0]),
ps.Assignment(dst[0, 0], src[1, 0] - src[-1, 0]),
]
try:
invalid_kernel = ps.create_kernel(invalid_description)
assert False, "Should never be executed"
except ValueError as e:
print(e)
```
%% Output
Field dst is written at two different locations
%% Cell type:markdown id: tags:
The independence restriction makes sure that the kernel can be safely parallelized by checking the following conditions: If a field is modified inside the kernel, it may only be modified at a single spatial position. In that case the field may also only be read at this position. Fields that are not modified may be read at multiple neighboring positions.
Specifically, this rule allows for in-place updates that don't access neighbors.
%% Cell type:code id: tags:
``` python
valid_kernel = ps.create_kernel(ps.Assignment(src[0,0], 2*src[0,0] + 42))
```
%% Cell type:markdown id: tags:
If a field stores multiple values per cell, as in the next example, this restriction only applies for accesses with the same index.
%% Cell type:code id: tags:
``` python
v = ps.fields("v(2): double[2D]")
valid_kernel = ps.create_kernel([ps.Assignment(v[0,0](1), 2*v[0,0](1) + 42),
ps.Assignment(v[0,1](0), 2*v[1,0](0) + 42)])
ps.Assignment(v[0,1](0), 2*v[0,1](0) + 42)])
```
%% Cell type:markdown id: tags:
### b) Static Single Assignment Form
All assignments that don't write to a field must be in SSA form
1. Each sympy symbol may only occur once as a left-hand-side (fields can be written multiple times)
2. A symbol has to be defined before it is used. If it is never defined it is introduced as function parameter
The next cell demonstrates the first SSA restriction:
%% Cell type:code id: tags:
``` python
@ps.kernel
def not_allowed():
a, b = sp.symbols("a b")
a @= src[0, 0]
b @= a + 3
a @= src[-1, 0]
dst[0, 0] @= a + b
try:
ps.create_kernel(not_allowed)
assert False
except ValueError as e:
print(e)
```
%% Output
Assignments not in SSA form, multiple assignments to a
%% Cell type:markdown id: tags:
However, for right hand sides that are Field.Accesses this is allowed:
Also it is not allowed to write a field at the same location
%% Cell type:code id: tags:
``` python
@ps.kernel
def allowed():
def not_allowed():
dst[0, 0] @= src[0, 1] + src[1, 0]
dst[0, 0] @= 2 * dst[0, 0]
ps.create_kernel(allowed)
try:
ps.create_kernel(not_allowed)
assert False
except ValueError as e:
print(e)
```
%% Output
KernelFunction kernel([_data_dst, _data_src])
Field dst is written twice at the same location
%% Cell type:markdown id: tags:
This situation should be resolved by introducing temporary variables
%% Cell type:code id: tags:
``` python
tmp_var = sp.Symbol("a")
@ps.kernel
def allowed():
tmp_var @= src[0, 1] + src[1, 0]
dst[0, 0] @= 2 * tmp_var
ast = ps.create_kernel(allowed)
ps.show_code(ast)
```
%% Output
......
%% Cell type:code id: tags:
``` python
import psutil
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot, cm
from pystencils.session import *
from pystencils.boundaries import add_neumann_boundary, Neumann, Dirichlet, BoundaryHandling
from pystencils.slicing import slice_from_direction
import math
import time
%matplotlib inline
```
%% Cell type:markdown id: tags:
Test to see if pycuda is installed which is needed to run calculations on the GPU
Test to see if cupy is installed which is needed to run calculations on the GPU
%% Cell type:code id: tags:
``` python
try:
import pycuda
import cupy
gpu = True
except ImportError:
gpu = False
pycuda = None
print('No pycuda installed')
if pycuda:
import pycuda.gpuarray as gpuarray
cupy = None
print('No cupy installed')
```
%% Output
No pycuda installed
%% Cell type:markdown id: tags:
# Tutorial 03: Datahandling
%% Cell type:markdown id: tags:
This is a tutorial about the `DataHandling` class of pystencils. This class is an abstraction layer to
- link numpy arrays to pystencils fields
- handle CPU-GPU array transfer, such that one can write code that works on CPU and GPU
- makes it possible to write MPI parallel simulations to run on distributed-memory clusters using the waLBerla library
We will look at a small and easy example to demonstrate the usage of `DataHandling` objects. We will define an averaging kernel to every cell of an array, that writes the average of the neighbor cell values to the center.
%% Cell type:markdown id: tags:
## 1. Manual
### 1.1. CPU kernels
In this first part, we set up a scenario manually without a `DataHandling`. In the next sections we then repeat the same setup with the help of the data handling.
One concept of *pystencils* that may be confusing at first, is the differences between pystencils fields and numpy arrays. Fields are used to describe the computation *symbolically* with sympy, while numpy arrays hold the actual values where the computation is executed on.
One option to create and execute a *pystencils* kernel is listed below. For reasons that become clear later we call this the **variable-field-size workflow**:
1. define pystencils fields
2. use sympy and the pystencils fields to define an update rule, that describes what should be done on *every cell*
3. compile the update rule to a real function, that can be called from Python. For each field that was referenced in the symbolic description the function expects a numpy array, passed as named parameter
4. create some numpy arrays with actual data
5. call the kernel - usually many times
Now, lets see how this actually looks in Python code:
%% Cell type:code id: tags:
``` python
# 1. field definitions
src_field, dst_field = ps.fields("src, dst:[2D]")
# 2. define update rule
update_rule = [ps.Assignment(lhs=dst_field[0, 0],
rhs=(src_field[1, 0] + src_field[-1, 0] +
src_field[0, 1] + src_field[0, -1]) / 4)]
# 3. compile update rule to function
kernel_function = ps.create_kernel(update_rule).compile()
# 4. create numpy arrays and call kernel
src_arr, dst_arr = np.random.rand(30, 30), np.zeros([30, 30])
# 5. call kernel
kernel_function(src=src_arr, dst=dst_arr) # names of arguments have to match names passed to ps.fields()
```
%% Cell type:markdown id: tags:
This workflow separates the symbolic and the numeric stages very cleanly. The separation also makes it possible to stop after step 3, write the C-code to a file and call the kernel from a C program. Speaking of the C-Code - lets have a look at the generated sources:
%% Cell type:code id: tags:
``` python
ps.show_code(kernel_function.ast)
```
%% Output
%% Cell type:markdown id: tags:
Even if it looks very ugly and low-level :) lets look at this code in a bit more detail. The code is generated in a way that it works for different array sizes. The size of the array is passed in the `_size_dst_` variables that specifiy the shape of the array for each dimension. Also, the memory layout (linearization) of the array can be different. That means the array could be stored in row-major or column-major order - if we pass in the array strides correctly the kernel does the right thing. If you're not familiar with the concept of strides check out [this stackoverflow post](https://stackoverflow.com/questions/53097952/how-to-understand-numpy-strides-for-layman) or search in the numpy documentation for strides - C vs Fortran order.
The goal of *pystencils* is to produce the fastest possible code. One technique to do this is to use all available information already on compile time and generate code that is highly adapted to the specific problem. In our case we already know the shape and strides of the arrays we want to apply the kernel to, so we can make use of this information. This idea leads to the **fixed-field-size workflow**. The main difference there is that we define the arrays first and therefore let *pystencils* know about the array shapes and strides, so that it can generate more specific code:
1. create numpy arrays that hold your data
2. define pystencils fields, this time telling pystencils already which arrays they correspond to, so that it knows about the size and strides
in the other steps nothing has changed
3. define the update rule
4. compile update rule to kernel
5. run the kernel
%% Cell type:code id: tags:
``` python
# 1. create arrays first
src_arr, dst_arr = np.random.rand(30, 30), np.zeros([30, 30])
# 2. define symbolic fields - note the additional parameter that link an array to each field
src_field, dst_field = ps.fields("src, dst:[2D]", src=src_arr, dst=dst_arr)
# 3. define update rule
update_rule = [ps.Assignment(lhs=dst_field[0, 0],
rhs=(src_field[1, 0] + src_field[-1, 0] +
src_field[0, 1] + src_field[0, -1]) / 4)]
# 4. compile it
kernel_function = ps.create_kernel(update_rule).compile()
# 5. call kernel
kernel_function(src=src_arr, dst=dst_arr) # names of arguments have to match names passed to ps.fields()
```
%% Cell type:markdown id: tags:
Functionally, both variants are equivalent. We see the difference only when we look at the generated code
%% Cell type:code id: tags:
``` python
ps.show_code(kernel_function.ast)
```
%% Output
%% Cell type:markdown id: tags:
Compare this to the code above! It looks much simpler. The reason is that all index computations are already simplified since the exact field sizes and strides are known. This kernel now only works on arrays of the previously specified size.
Lets try what happens if we use a different array:
%% Cell type:code id: tags:
``` python
src_arr2, dst_arr2 = np.random.rand(40, 40), np.zeros([40, 40])
try:
kernel_function(src=src_arr2, dst=dst_arr2)
except ValueError as e:
print(e)
```
%% Output
Wrong shape of array dst. Expected (30, 30)
%% Cell type:markdown id: tags:
### 1.2. GPU simulations
Let's now jump to a seemingly unrelated topic: running kernels on the GPU.
When creating the kernel, an additional parameter `target=ps.Target.GPU` has to be passed. Also, the compiled kernel cannot be called with numpy arrays directly, but has to be called with `pycuda.gpuarray`s instead. That means, we have to transfer our numpy array to GPU first. From this step we obtain a gpuarray, then we can run the kernel, hopefully multiple times so that the data transfer was worth the time. Finally we transfer the finished result back to CPU:
When creating the kernel, an additional parameter `target=ps.Target.GPU` has to be passed. Also, the compiled kernel cannot be called with numpy arrays directly, but has to be called with `cupy.array`s instead. That means, we have to transfer our numpy array to GPU first. From this step we obtain a gpuarray, then we can run the kernel, hopefully multiple times so that the data transfer was worth the time. Finally we transfer the finished result back to CPU:
%% Cell type:code id: tags:
``` python
if pycuda:
if cupy:
config = ps.CreateKernelConfig(target=ps.Target.GPU)
kernel_function_gpu = ps.create_kernel(update_rule, config=config).compile()
# transfer to GPU
src_arr_gpu = pycuda.gpuarray.to_gpu(src_arr)
dst_arr_gpu = pycuda.gpuarray.to_gpu(dst_arr)
src_arr_gpu = cupy.asarray(src_arr)
dst_arr_gpu = cupy.asarray(dst_arr)
# run kernel on GPU, this is done many times in real setups
kernel_function_gpu(src=src_arr_gpu, dst=dst_arr_gpu)
# transfer result back to CPU
dst_arr_gpu.get(dst_arr)
dst_arr = dst_arr_gpu.get()
```
%% Cell type:markdown id: tags:
### 1.3. Summary: manual way
- Don't confuse *pystencils* fields and *numpy* arrays
- fields are symbolic
- arrays are numeric
- Use the fixed-field-size workflow whenever possible, since code might be faster. Create arrays first, then create fields from arrays
- if we run GPU kernels, arrays have to transferred to the GPU first
As demonstrated in the examples above we have to define 2 or 3 corresponding objects for each grid:
- symbolic pystencils field
- numpy array on CPU
- for GPU run also a pycuda.gpuarray to mirror the data on the GPU
- for GPU use also cupy to mirror the data on the GPU
Managing these three objects manually is tedious and error-prone. We'll see in the next section how the data handling object takes care of this problem.
%% Cell type:markdown id: tags:
## 2. Introducing the data handling - serial version
### 2.1. Example for CPU simulations
The data handling internally keeps a mapping between symbolic fields and numpy arrays. When we create a field, automatically a corresponding array is allocated as well. Optionally we can also allocate memory on the GPU for the array as well. Lets dive right in and see how our example looks like, when implemented with a data handling.
%% Cell type:code id: tags:
``` python
dh = ps.create_data_handling(domain_size=(30, 30))
# fields are now created using the data handling
src_field = dh.add_array('src', values_per_cell=1)
dst_field = dh.add_array('dst', values_per_cell=1)
# kernel is created just like before
update_rule = [ps.Assignment(lhs=dst_field[0, 0],
rhs=(src_field[1, 0] + src_field[-1, 0] + src_field[0, 1] + src_field[0, -1]) / 4)]
kernel_function = ps.create_kernel(update_rule).compile()
# have a look at the generated code - it uses
# the fast version where array sizes are compiled-in
# ps.show_code(kernel_function.ast)
```
%% Cell type:markdown id: tags:
The data handling has methods to create fields - but where are the corresponding arrays?
In the serial case you can access them as a member of the data handling, for example to initialize our 'src' array we can write
%% Cell type:code id: tags:
``` python
src_arr = dh.cpu_arrays['src']
src_arr.fill(0.0)
```
%% Cell type:markdown id: tags:
This method is nice and easy, but you should not use it if you want your simulation to run on distributed-memory clusters. We'll see why in the last section. So it is good habit to not access the arrays directly but use the data handling to do so. We can, for example, initialize the array also with the following code:
%% Cell type:code id: tags:
``` python
dh.fill('src', 0.0)
```
%% Cell type:markdown id: tags:
To run the kernels with the same code as before, we would also need the arrays. We could do that accessing the `cpu_arrays`:
%% Cell type:code id: tags:
``` python
kernel_function(src=dh.cpu_arrays['src'],
dst=dh.cpu_arrays['dst'])
```
%% Cell type:markdown id: tags:
but to be prepared for MPI parallel simulations, again a method of the data handling should be used for this.
Besides, this method is also simpler to use - since it automatically detects which arrays a kernel uses and passes them in.
%% Cell type:code id: tags:
``` python
dh.run_kernel(kernel_function)
```
%% Cell type:markdown id: tags:
To access the data read-only instead of using `cpu_arrays` the gather function should be used.
This function gives you a read-only copy of the domain or part of the domain.
We will discuss this function later in more detail when we look at MPI parallel simulations.
For serial simulations keep in mind that modifying the resulting array does not change your original data!
%% Cell type:code id: tags:
``` python
read_only_copy = dh.gather_array('src', ps.make_slice[:, :], ghost_layers=False)
```
%% Cell type:markdown id: tags:
### 2.2. Example for GPU simulations
In this section we have a look at GPU simulations using the data handling. Only minimal changes are required.
When creating the data handling we can pass a 'default_target'. This means for every added field an array on the CPU and the GPU is allocated. This is a useful default, for more fine-grained control the `add_array` method also takes additional parameters controlling where the array should be allocated.
Additionally we also need to compile a GPU version of the kernel.
%% Cell type:code id: tags:
``` python
if gpu is False:
dh = ps.create_data_handling(domain_size=(30, 30), default_target=ps.Target.CPU)
else:
dh = ps.create_data_handling(domain_size=(30, 30), default_target=ps.Target.GPU)
# fields are now created using the data handling
src_field = dh.add_array('src', values_per_cell=1)
dst_field = dh.add_array('dst', values_per_cell=1)
# kernel is created just like before
update_rule = [ps.Assignment(lhs=dst_field[0, 0],
rhs=(src_field[1, 0] + src_field[-1, 0] + src_field[0, 1] + src_field[0, -1]) / 4)]
config = ps.CreateKernelConfig(target=dh.default_target)
kernel_function = ps.create_kernel(update_rule, config=config).compile()
dh.fill('src', 0.0)
```
%% Cell type:markdown id: tags:
The data handling provides function to transfer data between CPU and GPU
%% Cell type:code id: tags:
``` python
if gpu:
dh.to_gpu('src')
dh.run_kernel(kernel_function)
if gpu:
dh.to_cpu('dst')
```
%% Cell type:markdown id: tags:
usually one wants to transfer all fields that have been allocated on CPU and GPU at the same time:
%% Cell type:code id: tags:
``` python
dh.all_to_gpu()
dh.run_kernel(kernel_function)
dh.all_to_cpu()
```
%% Cell type:markdown id: tags:
We can always include the `all_to_*` functions in our code, since they do nothing if there are no arrays allocated on the GPU. Thus there is only a single point in the code where we can switch between CPU and GPU version: the `default_target` of the data handling.
%% Cell type:markdown id: tags:
### 2.2 Ghost Layers and periodicity
The data handling can also provide periodic boundary conditions. Therefor the domain is extended by one layer of cells, the so-called ghost layer or halo layer.
%% Cell type:code id: tags:
``` python
print("Shape of domain ", dh.shape)
print("Direct access to arrays ", dh.cpu_arrays['src'].shape)
print("Gather ", dh.gather_array('src', ghost_layers=True).shape)
```
%% Output
Shape of domain (30, 30)
Direct access to arrays (32, 32)
Gather (32, 32)
%% Cell type:markdown id: tags:
So the actual arrays are 2 cells larger than what you asked for. This additional layer is used to copy over the data from the other end of the array, such that for the stencil algorithm effectively the domain is periodic. This copying operation has to be started manually though:
%% Cell type:code id: tags:
``` python
dh = ps.create_data_handling((4, 4), periodicity=(True, False))
dh.add_array("src")
# get copy function
copy_fct = dh.synchronization_function(['src'])
dh.fill('src', 0.0, ghost_layers=True)
dh.fill('src', 3.0, ghost_layers=False)
before_sync = dh.gather_array('src', ghost_layers=True).copy()
copy_fct() # copy over to get periodicity in x direction
after_sync = dh.gather_array('src', ghost_layers=True).copy()
plt.subplot(1,2,1)
plt.scalar_field(before_sync);
plt.title("Before")
plt.subplot(1,2,2)
plt.scalar_field(after_sync);
plt.title("After");
```
%% Output
%% Cell type:markdown id: tags:
## 3. Going (MPI) parallel - the parallel data handling
### 3.1. Conceptual overview
To run MPI parallel simulations the waLBerla framework is used. waLBerla has to be compiled against your local MPI library and thus is a bit hard to install. We suggest to use the version shipped with conda for testing. For production, when you want to run on a cluster the best option is to build waLBerla yourself against the MPI library that is installed on your cluster.
Now lets have a look on how to write code that runs MPI parallel.
Since the data is distributed, we don't have access to the full array any more but only to the part that is stored locally. The domain is split into so called blocks, where one process might get one (or sometimes multiple) blocks. To do anything with the local part of the data we iterate over the **local** blocks to get the contents as numpy arrays. The blocks returned in the loop differ from process to process.
Copy the following snippet to a python file and run with multiple processes e.g.:
``mpirun -np 4 myscript.py`` you will see that there are as many blocks as processes.
%% Cell type:code id: tags:
``` python
dh = ps.create_data_handling(domain_size=(30, 30), parallel=True)
field = dh.add_array('field')
for block in dh.iterate():
# offset is in global coordinates, where first inner cell has coordiante (0,0)
# and ghost layers have negative coordinates
print(block.offset, block['field'].shape)
# use offset to create a local array 'my_data' for the part of the domain
#np.copyto(block[field.name], my_data)
```
%% Output
(-1, -1) (32, 32)
[0][PROGRESS]------(0.000 sec) Initializing SetupBlockForest:
[0] - AABB: [ <0,0,0>, <30,30,1> ]
[0] - forest size (root blocks / blocks on the initial grid): 1 x 1 x 1
[0] - periodicity: false x false x false
[0][PROGRESS]------(0.000 sec) Initializing SetupBlockForest: Allocating root blocks ...
[0][PROGRESS]------(0.000 sec) Initializing SetupBlockForest: Setting up neighborhood information for each block (with respect to periodicity) ...
[0][PROGRESS]------(0.000 sec) Initializing SetupBlockForest: Assigning workload, memory requirements, and SUIDs to blocks ...
[0][PROGRESS]------(0.000 sec) Initializing SetupBlockForest: finished!
[0] The following block structure has been created:
[0] - AABB: [ <0,0,0>, <30,30,1> ]
[0] - initial decomposition: 1 x 1 x 1 (= forest size)
[0] - periodicity: false x false x false
[0] - number of blocks discarded from the initial grid: 0 (= 0 %)
[0] - number of levels: 1
[0] - tree ID digits: 1 (-> block ID bytes = 1)
[0] - total number of blocks: 1
[0] - blocks have not yet been distributed to processes
[0][PROGRESS]------(0.000 sec) Balancing SetupBlockForest: Creating a process distribution for 1 process(es) ...
[0][PROGRESS]------(0.000 sec) Balancing SetupBlockForest: process distribution to 1 process(es) finished!
[0] - number of worker processes: 1
[0] - number of empty buffer processes: 0
[0] - buffer processes are inserted into the process network: no
[0] The resulting block structure looks like as follows:
[0] - AABB: [ <0,0,0>, <30,30,1> ]
[0] - initial decomposition: 1 x 1 x 1 (= forest size)
[0] - periodicity: false x false x false
[0] - number of blocks discarded from the initial grid: 0 (= 0 %)
[0] - number of levels: 1
[0] - tree ID digits: 1 (-> block ID bytes = 1)
[0] - total number of blocks: 1
[0] - number of processes: 1 (1 worker process(es) / 0 empty buffer process(es))
[0] - buffer processes are inserted into the process network: no
[0] - process ID bytes: 0
[0] - blocks/memory/workload per process:
[0] + blocks:
[0] - min = 1
[0] - max = 1
[0] - avg = 1
[0] - stdDev = 0
[0] - relStdDev = 0
[0] + memory:
[0] - min = 1
[0] - max = 1
[0] - avg = 1
[0] - stdDev = 0
[0] - relStdDev = 0
[0] + workload:
[0] - min = 1
[0] - max = 1
[0] - avg = 1
[0] - stdDev = 0
[0] - relStdDev = 0
[0][PROGRESS]------(0.000 sec) Adding block data ("cell bounding box")
[0][PROGRESS]------(0.000 sec) Adding block data ("field")
%% Cell type:markdown id: tags:
To get some more interesting results here in the notebook we put multiple blocks onto our single notebook process. This makes not much sense for real simulations, but for testing and demonstration purposes this is useful.
%% Cell type:code id: tags:
``` python
from waLBerla import createUniformBlockGrid
from pystencils.datahandling import ParallelDataHandling
blocks = createUniformBlockGrid(blocks=(2,1,2), cellsPerBlock=(20, 10, 20),
oneBlockPerProcess=False, periodic=(1, 0, 0))
dh = ParallelDataHandling(blocks)
field = dh.add_array('field')
for block in dh.iterate():
print(block.offset, block['field'].shape)
```
%% Output
(-1, -1, -1) (22, 12, 22)
(-1, -1, -1)[0][PROGRESS]------(0.025 sec) Initializing SetupBlockForest:
[0] - AABB: [ <0,0,0>, <40,10,40> ]
[0] - forest size (root blocks / blocks on the initial grid): 2 x 1 x 2
[0] - periodicity: true x false x false
(22, 12, 22)
(19, -1, -1) (22, 12, 22)
(-1, -1, 19) (22, 12, 22)
(19, -1, 19) (22, 12, 22)
[0][PROGRESS]------(0.025 sec) Initializing SetupBlockForest: Allocating root blocks ...
[0][PROGRESS]------(0.025 sec) Initializing SetupBlockForest: Setting up neighborhood information for each block (with respect to periodicity) ...
[0][PROGRESS]------(0.025 sec) Initializing SetupBlockForest: Assigning workload, memory requirements, and SUIDs to blocks ...
[0][PROGRESS]------(0.025 sec) Initializing SetupBlockForest: finished!
[0] The following block structure has been created:
[0] - AABB: [ <0,0,0>, <40,10,40> ]
[0] - initial decomposition: 2 x 1 x 2 (= forest size)
[0] - periodicity: true x false x false
[0] - number of blocks discarded from the initial grid: 0 (= 0 %)
[0] - number of levels: 1
[0] - tree ID digits: 3 (-> block ID bytes = 1)
[0] - total number of blocks: 4
[0] - blocks have not yet been distributed to processes
[0][PROGRESS]------(0.025 sec) Balancing SetupBlockForest: Creating a process distribution for 1 process(es) ...
[0][PROGRESS]------(0.025 sec) Balancing SetupBlockForest: process distribution to 1 process(es) finished!
[0] - number of worker processes: 1
[0] - number of empty buffer processes: 0
[0] - buffer processes are inserted into the process network: no
[0] The resulting block structure looks like as follows:
[0] - AABB: [ <0,0,0>, <40,10,40> ]
[0] - initial decomposition: 2 x 1 x 2 (= forest size)
[0] - periodicity: true x false x false
[0] - number of blocks discarded from the initial grid: 0 (= 0 %)
[0] - number of levels: 1
[0] - tree ID digits: 3 (-> block ID bytes = 1)
[0] - total number of blocks: 4
[0] - number of processes: 1 (1 worker process(es) / 0 empty buffer process(es))
[0] - buffer processes are inserted into the process network: no
[0] - process ID bytes: 0
[0] - blocks/memory/workload per process:
[0] + blocks:
[0] - min = 4
[0] - max = 4
[0] - avg = 4
[0] - stdDev = 0
[0] - relStdDev = 0
[0] + memory:
[0] - min = 4
[0] - max = 4
[0] - avg = 4
[0] - stdDev = 0
[0] - relStdDev = 0
[0] + workload:
[0] - min = 4
[0] - max = 4
[0] - avg = 4
[0] - stdDev = 0
[0] - relStdDev = 0
[0][PROGRESS]------(0.025 sec) Adding block data ("cell bounding box")
[0][PROGRESS]------(0.026 sec) Adding block data ("field")
%% Cell type:markdown id: tags:
Now we see that we have four blocks with (20, 10, 20) block each, and the global domain is (40, 10, 40) big.
All subblock also have a ghost layer around them, which is used to synchronize with their neighboring blocks (over the network). For ghost layer synchronization the same `synchronization_function` is used that we used above for periodic boundaries, because copying between blocks and copying the ghost layer for periodicity uses the same mechanism.
%% Cell type:code id: tags:
``` python
dh.gather_array('field').shape
```
%% Output
$\displaystyle \left( 40, \ 10, \ 40\right)$
(40, 10, 40)
%% Cell type:markdown id: tags:
### 3.2. Parallel example
To illustrate this in more detail, lets run a simple kernel on a parallel domain. waLBerla can handle 3D domains only, so we choose a z-size of 1.
%% Cell type:code id: tags:
``` python
blocks = createUniformBlockGrid(blocks=(2,4,1), cellsPerBlock=(20, 10, 1),
oneBlockPerProcess=False, periodic=(1, 1, 0))
dh = ParallelDataHandling(blocks, dim=2)
src_field = dh.add_array('src')
dst_field = dh.add_array('dst')
update_rule = [ps.Assignment(lhs=dst_field[0, 0 ],
rhs=(src_field[1, 0] + src_field[-1, 0] +
src_field[0, 1] + src_field[0, -1]) / 4)]
# 3. compile update rule to function
kernel_function = ps.create_kernel(update_rule).compile()
```
%% Output
[0][PROGRESS]------(0.119 sec) Initializing SetupBlockForest:
[0] - AABB: [ <0,0,0>, <40,40,1> ]
[0] - forest size (root blocks / blocks on the initial grid): 2 x 4 x 1
[0] - periodicity: true x true x false
[0][PROGRESS]------(0.119 sec) Initializing SetupBlockForest: Allocating root blocks ...
[0][PROGRESS]------(0.119 sec) Initializing SetupBlockForest: Setting up neighborhood information for each block (with respect to periodicity) ...
[0][PROGRESS]------(0.119 sec) Initializing SetupBlockForest: Assigning workload, memory requirements, and SUIDs to blocks ...
[0][PROGRESS]------(0.119 sec) Initializing SetupBlockForest: finished!
[0] The following block structure has been created:
[0] - AABB: [ <0,0,0>, <40,40,1> ]
[0] - initial decomposition: 2 x 4 x 1 (= forest size)
[0] - periodicity: true x true x false
[0] - number of blocks discarded from the initial grid: 0 (= 0 %)
[0] - number of levels: 1
[0] - tree ID digits: 4 (-> block ID bytes = 1)
[0] - total number of blocks: 8
[0] - blocks have not yet been distributed to processes
[0][PROGRESS]------(0.119 sec) Balancing SetupBlockForest: Creating a process distribution for 1 process(es) ...
[0][PROGRESS]------(0.119 sec) Balancing SetupBlockForest: process distribution to 1 process(es) finished!
[0] - number of worker processes: 1
[0] - number of empty buffer processes: 0
[0] - buffer processes are inserted into the process network: no
[0] The resulting block structure looks like as follows:
[0] - AABB: [ <0,0,0>, <40,40,1> ]
[0] - initial decomposition: 2 x 4 x 1 (= forest size)
[0] - periodicity: true x true x false
[0] - number of blocks discarded from the initial grid: 0 (= 0 %)
[0] - number of levels: 1
[0] - tree ID digits: 4 (-> block ID bytes = 1)
[0] - total number of blocks: 8
[0] - number of processes: 1 (1 worker process(es) / 0 empty buffer process(es))
[0] - buffer processes are inserted into the process network: no
[0] - process ID bytes: 0
[0] - blocks/memory/workload per process:
[0] + blocks:
[0] - min = 8
[0] - max = 8
[0] - avg = 8
[0] - stdDev = 0
[0] - relStdDev = 0
[0] + memory:
[0] - min = 8
[0] - max = 8
[0] - avg = 8
[0] - stdDev = 0
[0] - relStdDev = 0
[0] + workload:
[0] - min = 8
[0] - max = 8
[0] - avg = 8
[0] - stdDev = 0
[0] - relStdDev = 0
[0][PROGRESS]------(0.119 sec) Adding block data ("cell bounding box")
[0][PROGRESS]------(0.119 sec) Adding block data ("src")
[0][PROGRESS]------(0.120 sec) Adding block data ("dst")
%% Cell type:markdown id: tags:
Now lets initialize the arrays. To do this we can get arrays (meshgrids) from the block with the coordinates of the local cells. We use this to initialize a circular shape.
%% Cell type:code id: tags:
``` python
dh.fill('src', 0.0)
for block in dh.iterate(ghost_layers=False, inner_ghost_layers=False):
x, y = block.midpoint_arrays
inside_sphere = (x -20)**2 + (y-25)**2 < 8 ** 2
block['src'][inside_sphere] = 1.0
plt.scalar_field( dh.gather_array('src') );
```
%% Output
%% Cell type:markdown id: tags:
Now we can run our compute kernel on this data as usual. We just have to make sure that the field is synchronized after every step, i.e. that the ghost layers are correctly updated.
%% Cell type:code id: tags:
``` python
sync = dh.synchronization_function(['src'])
for i in range(40):
sync()
dh.run_kernel(kernel_function)
dh.swap('src', 'dst')
```
%% Cell type:code id: tags:
``` python
plt.scalar_field( dh.gather_array('src') );
```
%% Output
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
%% Cell type:code id: tags:
``` python
from pystencils.session import *
```
%% Cell type:markdown id: tags:
# Demo: Assignment collections and simplification
## Assignment collections
The assignment collection class helps to formulate and simplify assignments for numerical kernels.
An ``AssignmentCollection`` is an ordered collection of assignments, together with an optional ordered collection of subexpressions, that are required to evaluate the main assignments. There are various simplification rules available that operate on ``AssignmentCollection``s.
%% Cell type:markdown id: tags:
We start by defining some stencil update rule. Here we also use the *pystencils* ``Field``, note however that the assignment collection module works purely on the *sympy* level.
%% Cell type:code id: tags:
``` python
a,b,c = sp.symbols("a b c")
f = ps.fields("f(2) : [2D]")
g = ps.fields("g(2) : [2D]")
a1 = ps.Assignment(f[0,0](1), (a**2 +b) * f[0,1] + \
a1 = ps.Assignment(g[0,0](1), (a**2 +b) * f[0,1] + \
(a**2 - c) * f[1,0] + \
(a**2 - 2*c) * f[-1,0] + \
(a**2) * f[0, -1])
a2 = ps.Assignment(f[0,0](0), (c**2 +b) * f[0,1] + \
a2 = ps.Assignment(g[0,0](0), (c**2 +b) * f[0,1] + \
(c**2 - c) * f[1,0] + \
(c**2 - 2*c) * f[-1,0] + \
(c**2 - a**2) * f[0, -1])
ac = ps.AssignmentCollection([a1, a2], subexpressions=[])
ac
```
%% Output
Equation Collection for f_C^1,f_C^0
AssignmentCollection: g_C^0, g_C^1 <- f(f_N^0, b, f_S^0, f_E^0, a, f_W^0, c)
%% Cell type:markdown id: tags:
*sympy* operations can be applied on an assignment collection: In this example we first expand the collection, then look for common subexpressions.
%% Cell type:code id: tags:
``` python
expand_all = ps.simp.apply_to_all_assignments(sp.expand)
expandedEc = expand_all(ac)
```
%% Cell type:code id: tags:
``` python
ac_cse = ps.simp.sympy_cse(expandedEc)
ac_cse
```
%% Output
Equation Collection for f_C^1,f_C^0
AssignmentCollection: g_C^0, g_C^1 <- f(f_N^0, b, f_S^0, f_E^0, a, f_W^0, c)
%% Cell type:markdown id: tags:
Symbols occuring in assignment collections are classified into 3 categories:
- ``free_symbols``: symbols that occur in right-hand-sides but never on left-hand-sides
- ``bound_symbols``: symbols that occur on left-hand-sides
- ``defined_symbols``: symbols that occur on left-hand-sides of a main assignment
%% Cell type:code id: tags:
``` python
ac_cse.free_symbols
```
%% Output
$$\left\{{{f}_{E}^{0}}, {{f}_{N}^{0}}, {{f}_{S}^{0}}, {{f}_{W}^{0}}, a, b, c\right\}$$
set([f_E__0, f_N__0, f_S__0, f_W__0, a, b, c])
$\displaystyle \left\{{f}_{(1,0)}^{0}, {f}_{(0,1)}^{0}, {f}_{(0,-1)}^{0}, {f}_{(-1,0)}^{0}, a, b, c\right\}$
{f_E__0, f_N__0, f_S__0, f_W__0, a, b, c}
%% Cell type:code id: tags:
``` python
ac_cse.bound_symbols
```
%% Output
$$\left\{{{f}_{C}^{0}}, {{f}_{C}^{1}}, \xi_{0}, \xi_{1}, \xi_{2}, \xi_{3}\right\}$$
set([f_C__0, f_C__1, ξ₀, ξ₁, ξ₂, ξ₃])
$\displaystyle \left\{{g}_{(0,0)}^{0}, {g}_{(0,0)}^{1}, \xi_{0}, \xi_{1}, \xi_{2}, \xi_{3}\right\}$
{g_C__0, g_C__1, ξ₀, ξ₁, ξ₂, ξ₃}
%% Cell type:code id: tags:
``` python
ac_cse.defined_symbols
```
%% Output
$$\left\{{{f}_{C}^{0}}, {{f}_{C}^{1}}\right\}$$
set([f_C__0, f_C__1])
$\displaystyle \left\{{g}_{(0,0)}^{0}, {g}_{(0,0)}^{1}\right\}$
{g_C__0, g_C__1}
%% Cell type:markdown id: tags:
Assignment collections can be splitted up, and merged together. For splitting, a list of symbols that occur on the left-hand-side in the main assignments has to be passed. The returned assignment collection only contains these main assignments together with all necessary subexpressions.
%% Cell type:code id: tags:
``` python
ac_f0 = ac_cse.new_filtered([f(0)])
ac_f1 = ac_cse.new_filtered([f(1)])
ac_f0 = ac_cse.new_filtered([g(0)])
ac_f1 = ac_cse.new_filtered([g(1)])
ac_f1
```
%% Output
Equation Collection for f_C^1
AssignmentCollection: g_C^1, <- f(f_N^0, b, f_S^0, f_E^0, a, f_W^0, c)
%% Cell type:markdown id: tags:
Note here that $\xi_4$ is no longer part of the subexpressions, since it is not used in the main assignment of $f_C^1$.
If we merge both collections together, we end up with the original collection.
%% Cell type:code id: tags:
``` python
ac_f0.new_merged(ac_f1)
```
%% Output
Equation Collection for f_C^0,f_C^1
AssignmentCollection: g_C^0, g_C^1 <- f(f_N^0, b, f_S^0, f_E^0, a, f_W^0, c)
%% Cell type:markdown id: tags:
There is also a method that inserts all subexpressions into the main assignments. This is the inverse operation of common subexpression elimination.
%% Cell type:code id: tags:
``` python
assert sp.simplify(ac_f0.new_without_subexpressions().main_assignments[0].rhs - a2.rhs) == 0
ac_f0.new_without_subexpressions()
```
%% Output
Equation Collection for f_C^0
AssignmentCollection: g_C^0, <- f(f_N^0, b, f_S^0, f_E^0, a, f_W^0, c)
%% Cell type:markdown id: tags:
To evaluate an assignment collection, use the ``lambdify`` method. It is very similar to *sympy*s ``lambdify`` function.
%% Cell type:code id: tags:
``` python
evalFct = ac_cse.lambdify([f[0,1], f[1,0]], # new parameters of returned function
fixed_symbols={a:1, b:2, c:3, f[0,-1]: 4, f[-1,0]: 5}) # fix values of other symbols
evalFct(2,1)
```
%% Output
$$\left \{ {{f}_{C}^{0}} : 75, \quad {{f}_{C}^{1}} : -17\right \}$$
{f_C__0: 75, f_C__1: -17}
$\displaystyle \left\{ {g}_{(0,0)}^{0} : 75, \ {g}_{(0,0)}^{1} : -17\right\}$
{g_C__0: 75, g_C__1: -17}
%% Cell type:markdown id: tags:
lambdify is rather slow for evaluation. The intended way to evaluate an assignment collection is *pystencils* i.e. create a fast kernel, that applies the update at every site of a structured grid. The collection can be directly passed to the `create_kernel` function.
%% Cell type:code id: tags:
``` python
func = ps.create_kernel(ac_cse).compile()
```
%% Cell type:markdown id: tags:
## Simplification Strategies
In above examples, we already applied simplification rules to assignment collections. Simplification rules are functions that take, as a single argument, an assignment collection and return an modified/simplified copy of it. The ``SimplificationStrategy`` class holds a list of simplification rules and can apply all of them in the specified order. Additionally it provides useful printing and reporting functions.
We start by creating a simplification strategy, consisting of the expand and CSE simplifications we have already applied above:
%% Cell type:code id: tags:
``` python
strategy = ps.simp.SimplificationStrategy()
strategy.add(ps.simp.apply_to_all_assignments(sp.expand))
strategy.add(ps.simp.sympy_cse)
```
%% Cell type:markdown id: tags:
This strategy can be applied to any assignment collection:
%% Cell type:code id: tags:
``` python
strategy(ac)
```
%% Output
Equation Collection for f_C^1,f_C^0
AssignmentCollection: g_C^0, g_C^1 <- f(f_N^0, b, f_S^0, f_E^0, a, f_W^0, c)
%% Cell type:markdown id: tags:
The strategy can also print the simplification results at each stage.
The report contains information about the number of operations after each simplification as well as the runtime of each simplification routine.
%% Cell type:code id: tags:
``` python
strategy.create_simplification_report(ac)
```
%% Output
<pystencils.simp.simplificationstrategy.SimplificationStrategy.create_simplification_report.<locals>.Report at 0x7f9be404fda0>
<pystencils.simp.simplificationstrategy.SimplificationStrategy.create_simplification_report.<locals>.Report at 0x147de3e90>
%% Cell type:markdown id: tags:
The strategy can also print the full collection after each simplification...
%% Cell type:code id: tags:
``` python
strategy.show_intermediate_results(ac)
```
%% Output
<pystencils.simp.simplificationstrategy.SimplificationStrategy.show_intermediate_results.<locals>.IntermediateResults at 0x7f9bad688dd8>
<pystencils.simp.simplificationstrategy.SimplificationStrategy.show_intermediate_results.<locals>.IntermediateResults at 0x147e09c90>
%% Cell type:markdown id: tags:
... or only specific assignments for better readability
%% Cell type:code id: tags:
``` python
strategy.show_intermediate_results(ac, symbols=[f(1)])
strategy.show_intermediate_results(ac, symbols=[g(1)])
```
%% Output
<pystencils.simp.simplificationstrategy.SimplificationStrategy.show_intermediate_results.<locals>.IntermediateResults at 0x7f9bad688b00>
<pystencils.simp.simplificationstrategy.SimplificationStrategy.show_intermediate_results.<locals>.IntermediateResults at 0x1265a1b90>
%% Cell type:code id: tags:
``` python
```
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -11,11 +11,11 @@ Creating kernels
.. autoclass:: pystencils.CreateKernelConfig
:members:
.. autofunction:: pystencils.create_domain_kernel
.. autofunction:: pystencils.kernelcreation.create_domain_kernel
.. autofunction:: pystencils.create_indexed_kernel
.. autofunction:: pystencils.kernelcreation.create_indexed_kernel
.. autofunction:: pystencils.create_staggered_kernel
.. autofunction:: pystencils.kernelcreation.create_staggered_kernel
Code printing
......@@ -27,11 +27,11 @@ Code printing
GPU Indexing
-------------
.. autoclass:: pystencils.gpucuda.AbstractIndexing
.. autoclass:: pystencils.gpu.AbstractIndexing
:members:
.. autoclass:: pystencils.gpucuda.BlockIndexing
.. autoclass:: pystencils.gpu.BlockIndexing
:members:
.. autoclass:: pystencils.gpucuda.LineIndexing
.. autoclass:: pystencils.gpu.LineIndexing
:members:
[project]
name = "pystencils"
description = "Speeding up stencil computations on CPUs and GPUs"
dynamic = ["version"]
readme = "README.md"
authors = [
{ name = "Martin Bauer" },
{ name = "Jan Hönig " },
{ name = "Markus Holzer" },
{ name = "Frederik Hennig" },
{ email = "cs10-codegen@fau.de" },
]
license = { file = "COPYING.txt" }
requires-python = ">=3.10"
dependencies = ["sympy>=1.9,<=1.12.1", "numpy>=1.8.0", "appdirs", "joblib", "pyyaml", "fasteners"]
classifiers = [
"Development Status :: 4 - Beta",
"Framework :: Jupyter",
"Topic :: Software Development :: Code Generators",
"Topic :: Scientific/Engineering :: Physics",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
]
[project.urls]
"Bug Tracker" = "https://i10git.cs.fau.de/pycodegen/pystencils/-/issues"
"Documentation" = "https://pycodegen.pages.i10git.cs.fau.de/pystencils/"
"Source Code" = "https://i10git.cs.fau.de/pycodegen/pystencils"
[project.optional-dependencies]
gpu = ['cupy']
alltrafos = ['islpy', 'py-cpuinfo']
bench_db = ['blitzdb', 'pymongo', 'pandas']
interactive = [
'matplotlib',
'ipy_table',
'imageio',
'jupyter',
'pyevtk',
'rich',
'graphviz',
]
use_cython = [
'Cython'
]
doc = [
'sphinx',
'sphinx_rtd_theme',
'nbsphinx',
'sphinxcontrib-bibtex',
'sphinx_autodoc_typehints',
'pandoc',
]
tests = [
'pytest',
'pytest-cov',
'pytest-html',
'ansi2html',
'pytest-xdist',
'flake8',
'nbformat',
'nbconvert',
'ipython',
'matplotlib',
'py-cpuinfo',
'randomgen>=1.18',
]
[build-system]
requires = [
"setuptools>=61",
"versioneer[toml]>=0.29",
# 'Cython'
]
build-backend = "setuptools.build_meta"
[tool.setuptools.package-data]
pystencils = [
"include/*.h",
"boundaries/createindexlistcython.pyx"
]
[tool.setuptools.packages.find]
where = ["src"]
include = ["pystencils", "pystencils.*"]
namespaces = false
[tool.versioneer]
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
VCS = "git"
style = "pep440"
versionfile_source = "src/pystencils/_version.py"
versionfile_build = "pystencils/_version.py"
tag_prefix = "release/"
parentdir_prefix = "pystencils-"
__prof_trigger
printf
__syncthreads
__syncthreads_count
__syncthreads_and
__syncthreads_or
__syncwarp
__threadfence
__threadfence_block
__threadfence_system
atomicAdd
atomicSub
atomicExch
atomicMin
atomicMax
atomicInc
atomicDec
atomicAnd
atomicOr
atomicXor
atomicCAS
__all_sync
__any_sync
__ballot_sync
__active_mask
__shfl_sync
__shfl_up_sync
__shfl_down_sync
__shfl_xor_sync
__match_any_sync
__match_all_sync
__isGlobal
__isShared
__isConstant
__isLocal
tex1Dfetch
tex1D
tex2D
tex3D
sqrtf
rsqrtf
cbrtf
rcbrtf
hypotf
rhypotf
norm3df
rnorm3df
norm4df
rnorm4df
normf
rnormf
expf
exp2f
exp10f
expm1f
logf
log2f
log10f
log1pf
sinf
cosf
tanf
sincosf
sinpif
cospif
sincospif
asinf
acosf
atanf
atan2f
sinhf
coshf
tanhf
asinhf
acoshf
atanhf
powf
erff
erfcf
erfinvf
erfcinvf
erfcxf
normcdff
normcdfinvf
lgammaf
tgammaf
fmaf
frexpf
ldexpf
scalbnf
scalblnf
logbf
ilogbf
j0f
j1f
jnf
y0f
y1f
ynf
cyl_bessel_i0f
cyl_bessel_i1f
fmodf
remainderf
remquof
modff
fdimf
truncf
roundf
rintf
nearbyintf
ceilf
floorf
lrintf
lroundf
llrintf
llroundf
sqrt
rsqrt
cbrt
rcbrt
hypot
rhypot
norm3d
rnorm3d
norm4d
rnorm4d
norm
rnorm
exp
exp2
exp10
expm1
log
log2
log10
log1p
sin
cos
tan
sincos
sinpi
cospi
sincospi
asin
acos
atan
atan2
sinh
cosh
tanh
asinh
acosh
atanh
pow
erf
erfc
erfinv
erfcinv
erfcx
normcdf
normcdfinv
lgamma
tgamma
fma
frexp
ldexp
scalbn
scalbln
logb
ilogb
j0
j1
jn
y0
y1
yn
cyl_bessel_i0
cyl_bessel_i1
fmod
remainder
remquo
mod
fdim
trunc
round
rint
nearbyint
ceil
floor
lrint
lround
llrint
llround
__fdividef
__sinf
__cosf
__tanf
__sincosf
__logf
__log2f
__log10f
__expf
__exp10f
__powf
__fadd_rn
__fsub_rn
__fmul_rn
__fmaf_rn
__frcp_rn
__fsqrt_rn
__frsqrt_rn
__fdiv_rn
__fadd_rz
__fsub_rz
__fmul_rz
__fmaf_rz
__frcp_rz
__fsqrt_rz
__frsqrt_rz
__fdiv_rz
__fadd_ru
__fsub_ru
__fmul_ru
__fmaf_ru
__frcp_ru
__fsqrt_ru
__frsqrt_ru
__fdiv_ru
__fadd_rd
__fsub_rd
__fmul_rd
__fmaf_rd
__frcp_rd
__fsqrt_rd
__frsqrt_rd
__fdiv_rd
__fdividef
__expf
__exp10f
__logf
__log2f
__log10f
__sinf
__cosf
__sincosf
__tanf
__powf
__dadd_rn
__dsub_rn
__dmul_rn
__fma_rn
__ddiv_rn
__drcp_rn
__dsqrt_rn
__dadd_rz
__dsub_rz
__dmul_rz
__fma_rz
__ddiv_rz
__drcp_rz
__dsqrt_rz
__dadd_ru
__dsub_ru
__dmul_ru
__fma_ru
__ddiv_ru
__drcp_ru
__dsqrt_ru
__dadd_rd
__dsub_rd
__dmul_rd
__fma_rd
__ddiv_rd
__drcp_rd
__dsqrt_rd
import ctypes
from collections import defaultdict
from functools import partial
from typing import Tuple
import numpy as np
import sympy as sp
import sympy.codegen.ast
from sympy.core.cache import cacheit
from sympy.logic.boolalg import Boolean, BooleanFunction
import pystencils
from pystencils.cache import memorycache, memorycache_if_hashable
from pystencils.utils import all_equal
def typed_symbols(names, dtype, *args):
symbols = sp.symbols(names, *args)
if isinstance(symbols, Tuple):
return tuple(TypedSymbol(str(s), dtype) for s in symbols)
else:
return TypedSymbol(str(symbols), dtype)
def type_all_numbers(expr, dtype):
substitutions = {a: cast_func(a, dtype) for a in expr.atoms(sp.Number)}
return expr.subs(substitutions)
def matrix_symbols(names, dtype, rows, cols):
if isinstance(names, str):
names = names.replace(' ', '').split(',')
matrices = []
for n in names:
symbols = typed_symbols(f"{n}:{rows * cols}", dtype)
matrices.append(sp.Matrix(rows, cols, lambda i, j: symbols[i * cols + j]))
return tuple(matrices)
def assumptions_from_dtype(dtype):
"""Derives SymPy assumptions from :class:`BasicType` or a Numpy dtype
Args:
dtype (BasicType, np.dtype): a Numpy data type
Returns:
A dict of SymPy assumptions
"""
if hasattr(dtype, 'numpy_dtype'):
dtype = dtype.numpy_dtype
assumptions = dict()
try:
if np.issubdtype(dtype, np.integer):
assumptions.update({'integer': True})
if np.issubdtype(dtype, np.unsignedinteger):
assumptions.update({'negative': False})
if np.issubdtype(dtype, np.integer) or \
np.issubdtype(dtype, np.floating):
assumptions.update({'real': True})
except Exception:
pass
return assumptions
# noinspection PyPep8Naming
class address_of(sp.Function):
is_Atom = True
def __new__(cls, arg):
obj = sp.Function.__new__(cls, arg)
return obj
@property
def canonical(self):
if hasattr(self.args[0], 'canonical'):
return self.args[0].canonical
else:
raise NotImplementedError()
@property
def is_commutative(self):
return self.args[0].is_commutative
@property
def dtype(self):
if hasattr(self.args[0], 'dtype'):
return PointerType(self.args[0].dtype, restrict=True)
else:
return PointerType('void', restrict=True)
# noinspection PyPep8Naming
class cast_func(sp.Function):
is_Atom = True
def __new__(cls, *args, **kwargs):
if len(args) != 2:
pass
expr, dtype, *other_args = args
if not isinstance(dtype, Type):
dtype = create_type(dtype)
# to work in conditions of sp.Piecewise cast_func has to be of type Boolean as well
# however, a cast_function should only be a boolean if its argument is a boolean, otherwise this leads
# to problems when for example comparing cast_func's for equality
#
# lhs = bitwise_and(a, cast_func(1, 'int'))
# rhs = cast_func(0, 'int')
# print( sp.Ne(lhs, rhs) ) # would give true if all cast_funcs are booleans
# -> thus a separate class boolean_cast_func is introduced
if isinstance(expr, Boolean) and (not isinstance(expr, TypedSymbol) or expr.dtype == BasicType(bool)):
cls = boolean_cast_func
return sp.Function.__new__(cls, expr, dtype, *other_args, **kwargs)
@property
def canonical(self):
if hasattr(self.args[0], 'canonical'):
return self.args[0].canonical
else:
raise NotImplementedError()
@property
def is_commutative(self):
return self.args[0].is_commutative
def _eval_evalf(self, *args, **kwargs):
return self.args[0].evalf()
@property
def dtype(self):
return self.args[1]
@property
def is_integer(self):
"""
Uses Numpy type hierarchy to determine :func:`sympy.Expr.is_integer` predicate
For reference: Numpy type hierarchy https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.scalars.html
"""
if hasattr(self.dtype, 'numpy_dtype'):
return np.issubdtype(self.dtype.numpy_dtype, np.integer) or super().is_integer
else:
return super().is_integer
@property
def is_negative(self):
"""
See :func:`.TypedSymbol.is_integer`
"""
if hasattr(self.dtype, 'numpy_dtype'):
if np.issubdtype(self.dtype.numpy_dtype, np.unsignedinteger):
return False
return super().is_negative
@property
def is_nonnegative(self):
"""
See :func:`.TypedSymbol.is_integer`
"""
if self.is_negative is False:
return True
else:
return super().is_nonnegative
@property
def is_real(self):
"""
See :func:`.TypedSymbol.is_integer`
"""
if hasattr(self.dtype, 'numpy_dtype'):
return np.issubdtype(self.dtype.numpy_dtype, np.integer) or \
np.issubdtype(self.dtype.numpy_dtype, np.floating) or \
super().is_real
else:
return super().is_real
# noinspection PyPep8Naming
class boolean_cast_func(cast_func, Boolean):
pass
# noinspection PyPep8Naming
class vector_memory_access(cast_func):
# Arguments are: read/write expression, type, aligned, nontemporal, mask (or none), stride
nargs = (6,)
# noinspection PyPep8Naming
class reinterpret_cast_func(cast_func):
pass
# noinspection PyPep8Naming
class pointer_arithmetic_func(sp.Function, Boolean):
@property
def canonical(self):
if hasattr(self.args[0], 'canonical'):
return self.args[0].canonical
else:
raise NotImplementedError()
class TypedSymbol(sp.Symbol):
def __new__(cls, *args, **kwds):
obj = TypedSymbol.__xnew_cached_(cls, *args, **kwds)
return obj
def __new_stage2__(cls, name, dtype, **kwargs):
assumptions = assumptions_from_dtype(dtype)
assumptions.update(kwargs)
obj = super(TypedSymbol, cls).__xnew__(cls, name, **assumptions)
try:
obj._dtype = create_type(dtype)
except (TypeError, ValueError):
# on error keep the string
obj._dtype = dtype
return obj
__xnew__ = staticmethod(__new_stage2__)
__xnew_cached_ = staticmethod(cacheit(__new_stage2__))
@property
def dtype(self):
return self._dtype
def _hashable_content(self):
return super()._hashable_content(), hash(self._dtype)
def __getnewargs__(self):
return self.name, self.dtype
def __getnewargs_ex__(self):
return (self.name, self.dtype), self.assumptions0
@property
def canonical(self):
return self
@property
def reversed(self):
return self
@property
def headers(self):
headers = []
try:
if np.issubdtype(self.dtype.numpy_dtype, np.complexfloating):
headers.append('"cuda_complex.hpp"')
except Exception:
pass
try:
if np.issubdtype(self.dtype.base_type.numpy_dtype, np.complexfloating):
headers.append('"cuda_complex.hpp"')
except Exception:
pass
return headers
def create_type(specification):
"""Creates a subclass of Type according to a string or an object of subclass Type.
Args:
specification: Type object, or a string
Returns:
Type object, or a new Type object parsed from the string
"""
if isinstance(specification, Type):
return specification
else:
numpy_dtype = np.dtype(specification)
if numpy_dtype.fields is None:
return BasicType(numpy_dtype, const=False)
else:
return StructType(numpy_dtype, const=False)
@memorycache(maxsize=64)
def create_composite_type_from_string(specification):
"""Creates a new Type object from a c-like string specification.
Args:
specification: Specification string
Returns:
Type object
"""
specification = specification.lower().split()
parts = []
current = []
for s in specification:
if s == '*':
parts.append(current)
current = [s]
else:
current.append(s)
if len(current) > 0:
parts.append(current)
# Parse native part
base_part = parts.pop(0)
const = False
if 'const' in base_part:
const = True
base_part.remove('const')
assert len(base_part) == 1
if base_part[0][-1] == "*":
base_part[0] = base_part[0][:-1]
parts.append('*')
current_type = BasicType(np.dtype(base_part[0]), const)
# Parse pointer parts
for part in parts:
restrict = False
const = False
if 'restrict' in part:
restrict = True
part.remove('restrict')
if 'const' in part:
const = True
part.remove("const")
assert len(part) == 1 and part[0] == '*'
current_type = PointerType(current_type, const, restrict)
return current_type
def get_base_type(data_type):
while data_type.base_type is not None:
data_type = data_type.base_type
return data_type
def to_ctypes(data_type):
"""
Transforms a given Type into ctypes
:param data_type: Subclass of Type
:return: ctypes type object
"""
if isinstance(data_type, PointerType):
return ctypes.POINTER(to_ctypes(data_type.base_type))
elif isinstance(data_type, StructType):
return ctypes.POINTER(ctypes.c_uint8)
else:
return to_ctypes.map[data_type.numpy_dtype]
to_ctypes.map = {
np.dtype(np.int8): ctypes.c_int8,
np.dtype(np.int16): ctypes.c_int16,
np.dtype(np.int32): ctypes.c_int32,
np.dtype(np.int64): ctypes.c_int64,
np.dtype(np.uint8): ctypes.c_uint8,
np.dtype(np.uint16): ctypes.c_uint16,
np.dtype(np.uint32): ctypes.c_uint32,
np.dtype(np.uint64): ctypes.c_uint64,
np.dtype(np.float32): ctypes.c_float,
np.dtype(np.float64): ctypes.c_double,
}
def peel_off_type(dtype, type_to_peel_off):
while type(dtype) is type_to_peel_off:
dtype = dtype.base_type
return dtype
def collate_types(types,
forbid_collation_to_complex=False,
forbid_collation_to_float=False,
default_float_type='float64',
default_int_type='int64'):
"""
Takes a sequence of types and returns their "common type" e.g. (float, double, float) -> double
Uses the collation rules from numpy.
"""
if forbid_collation_to_complex:
types = [t for t in types if not np.issubdtype(t.numpy_dtype, np.complexfloating)]
if not types:
return create_type(default_float_type)
if forbid_collation_to_float:
types = [t for t in types if not np.issubdtype(t.numpy_dtype, np.floating)]
if not types:
return create_type(default_int_type)
# Pointer arithmetic case i.e. pointer + integer is allowed
if any(type(t) is PointerType for t in types):
pointer_type = None
for t in types:
if type(t) is PointerType:
if pointer_type is not None:
raise ValueError("Cannot collate the combination of two pointer types")
pointer_type = t
elif type(t) is BasicType:
if not (t.is_int() or t.is_uint()):
raise ValueError("Invalid pointer arithmetic")
else:
raise ValueError("Invalid pointer arithmetic")
return pointer_type
# peel of vector types, if at least one vector type occurred the result will also be the vector type
vector_type = [t for t in types if type(t) is VectorType]
if not all_equal(t.width for t in vector_type):
raise ValueError("Collation failed because of vector types with different width")
types = [peel_off_type(t, VectorType) for t in types]
# now we should have a list of basic types - struct types are not yet supported
assert all(type(t) is BasicType for t in types)
if any(t.is_float() for t in types):
types = tuple(t for t in types if t.is_float())
# use numpy collation -> create type from numpy type -> and, put vector type around if necessary
result_numpy_type = np.result_type(*(t.numpy_dtype for t in types))
result = BasicType(result_numpy_type)
if vector_type:
result = VectorType(result, vector_type[0].width)
return result
@memorycache_if_hashable(maxsize=2048)
def get_type_of_expression(expr,
default_float_type='double',
default_int_type='int',
symbol_type_dict=None):
from pystencils.astnodes import ResolvedFieldAccess
from pystencils.cpu.vectorization import vec_all, vec_any
if default_float_type == 'float':
default_float_type = 'float32'
if not symbol_type_dict:
symbol_type_dict = defaultdict(lambda: create_type('double'))
get_type = partial(get_type_of_expression,
default_float_type=default_float_type,
default_int_type=default_int_type,
symbol_type_dict=symbol_type_dict)
expr = sp.sympify(expr)
if isinstance(expr, sp.Integer):
return create_type(default_int_type)
elif expr.is_real is False:
return create_type((np.zeros((1,), default_float_type) * 1j).dtype)
elif isinstance(expr, sp.Rational) or isinstance(expr, sp.Float):
return create_type(default_float_type)
elif isinstance(expr, ResolvedFieldAccess):
return expr.field.dtype
elif isinstance(expr, pystencils.field.Field.AbstractAccess):
return expr.field.dtype
elif isinstance(expr, TypedSymbol):
return expr.dtype
elif isinstance(expr, sp.Symbol):
if symbol_type_dict:
return symbol_type_dict[expr.name]
else:
raise ValueError("All symbols inside this expression have to be typed! ", str(expr))
elif isinstance(expr, cast_func):
return expr.args[1]
elif isinstance(expr, (vec_any, vec_all)):
return create_type("bool")
elif hasattr(expr, 'func') and expr.func == sp.Piecewise:
collated_result_type = collate_types(tuple(get_type(a[0]) for a in expr.args))
collated_condition_type = collate_types(tuple(get_type(a[1]) for a in expr.args))
if type(collated_condition_type) is VectorType and type(collated_result_type) is not VectorType:
collated_result_type = VectorType(collated_result_type, width=collated_condition_type.width)
return collated_result_type
elif isinstance(expr, sp.Indexed):
typed_symbol = expr.base.label
return typed_symbol.dtype.base_type
elif isinstance(expr, (Boolean, BooleanFunction)):
# if any arg is of vector type return a vector boolean, else return a normal scalar boolean
result = create_type("bool")
vec_args = [get_type(a) for a in expr.args if isinstance(get_type(a), VectorType)]
if vec_args:
result = VectorType(result, width=vec_args[0].width)
return result
elif isinstance(expr, sp.Pow):
base_type = get_type(expr.args[0])
if expr.exp.is_integer:
return base_type
else:
return collate_types([create_type(default_float_type), base_type])
elif isinstance(expr, (sp.Sum, sp.Product)):
return get_type(expr.args[0])
elif isinstance(expr, sp.Expr):
expr: sp.Expr
if expr.args:
types = tuple(get_type(a) for a in expr.args)
# collate_types checks numpy_dtype in the special cases
if any(not hasattr(t, 'numpy_dtype') for t in types):
forbid_collation_to_complex = False
forbid_collation_to_float = False
else:
forbid_collation_to_complex = expr.is_real is True
forbid_collation_to_float = expr.is_integer is True
return collate_types(
types,
forbid_collation_to_complex=forbid_collation_to_complex,
forbid_collation_to_float=forbid_collation_to_float,
default_float_type=default_float_type,
default_int_type=default_int_type)
else:
if expr.is_integer:
return create_type(default_int_type)
else:
return create_type(default_float_type)
raise NotImplementedError("Could not determine type for", expr, type(expr))
sympy_version = sp.__version__.split('.')
if int(sympy_version[0]) * 100 + int(sympy_version[1]) >= 109:
# __setstate__ would bypass the contructor, so we remove it
sp.Number.__getstate__ = sp.Basic.__getstate__
del sp.Basic.__getstate__
class FunctorWithStoredKwargs:
def __init__(self, func, **kwargs):
self.func = func
self.kwargs = kwargs
def __call__(self, *args):
return self.func(*args, **self.kwargs)
# __reduce_ex__ would strip kwargs, so we override it
def basic_reduce_ex(self, protocol):
if hasattr(self, '__getnewargs_ex__'):
args, kwargs = self.__getnewargs_ex__()
else:
args, kwargs = self.__getnewargs__(), {}
if hasattr(self, '__getstate__'):
state = self.__getstate__()
else:
state = None
return FunctorWithStoredKwargs(type(self), **kwargs), args, state
sp.Number.__reduce_ex__ = sp.Basic.__reduce_ex__
sp.Basic.__reduce_ex__ = basic_reduce_ex
class Type(sp.Atom):
def __new__(cls, *args, **kwargs):
return sp.Basic.__new__(cls)
def _sympystr(self, *args, **kwargs):
return str(self)
class BasicType(Type):
@staticmethod
def numpy_name_to_c(name):
if name == 'float64':
return 'double'
elif name == 'float32':
return 'float'
elif name == 'complex64':
return 'ComplexFloat'
elif name == 'complex128':
return 'ComplexDouble'
elif name.startswith('int'):
width = int(name[len("int"):])
return f"int{width}_t"
elif name.startswith('uint'):
width = int(name[len("uint"):])
return f"uint{width}_t"
elif name == 'bool':
return 'bool'
else:
raise NotImplementedError(f"Can map numpy to C name for {name}")
def __init__(self, dtype, const=False):
self.const = const
if isinstance(dtype, Type):
self._dtype = dtype.numpy_dtype
else:
self._dtype = np.dtype(dtype)
assert self._dtype.fields is None, "Tried to initialize NativeType with a structured type"
assert self._dtype.hasobject is False
assert self._dtype.subdtype is None
def __getnewargs__(self):
return self.numpy_dtype, self.const
def __getnewargs_ex__(self):
return (self.numpy_dtype, self.const), {}
@property
def base_type(self):
return None
@property
def numpy_dtype(self):
return self._dtype
@property
def sympy_dtype(self):
return getattr(sympy.codegen.ast, str(self.numpy_dtype))
@property
def item_size(self):
return 1
def is_int(self):
return self.numpy_dtype in np.sctypes['int'] or self.numpy_dtype in np.sctypes['uint']
def is_float(self):
return self.numpy_dtype in np.sctypes['float']
def is_uint(self):
return self.numpy_dtype in np.sctypes['uint']
def is_complex(self):
return self.numpy_dtype in np.sctypes['complex']
def is_other(self):
return self.numpy_dtype in np.sctypes['others']
@property
def base_name(self):
return BasicType.numpy_name_to_c(str(self._dtype))
def __str__(self):
result = BasicType.numpy_name_to_c(str(self._dtype))
if self.const:
result += " const"
return result
def __repr__(self):
return str(self)
def __eq__(self, other):
if not isinstance(other, BasicType):
return False
else:
return (self.numpy_dtype, self.const) == (other.numpy_dtype, other.const)
def __hash__(self):
return hash(str(self))
class VectorType(Type):
instruction_set = None
def __init__(self, base_type, width=4):
self._base_type = base_type
self.width = width
@property
def base_type(self):
return self._base_type
@property
def item_size(self):
return self.width * self.base_type.item_size
def __eq__(self, other):
if not isinstance(other, VectorType):
return False
else:
return (self.base_type, self.width) == (other.base_type, other.width)
def __str__(self):
if self.instruction_set is None:
return f"{self.base_type}[{self.width}]"
else:
if self.base_type == create_type("int64") or self.base_type == create_type("int32"):
return self.instruction_set['int']
elif self.base_type == create_type("float64"):
return self.instruction_set['double']
elif self.base_type == create_type("float32"):
return self.instruction_set['float']
elif self.base_type == create_type("bool"):
return self.instruction_set['bool']
else:
raise NotImplementedError()
def __hash__(self):
return hash((self.base_type, self.width))
def __getnewargs__(self):
return self._base_type, self.width
def __getnewargs_ex__(self):
return (self._base_type, self.width), {}
class PointerType(Type):
def __init__(self, base_type, const=False, restrict=True):
self._base_type = base_type
self.const = const
self.restrict = restrict
def __getnewargs__(self):
return self.base_type, self.const, self.restrict
def __getnewargs_ex__(self):
return (self.base_type, self.const, self.restrict), {}
@property
def alias(self):
return not self.restrict
@property
def base_type(self):
return self._base_type
@property
def item_size(self):
return self.base_type.item_size
def __eq__(self, other):
if not isinstance(other, PointerType):
return False
else:
return (self.base_type, self.const, self.restrict) == (other.base_type, other.const, other.restrict)
def __str__(self):
components = [str(self.base_type), '*']
if self.restrict:
components.append('RESTRICT')
if self.const:
components.append("const")
return " ".join(components)
def __repr__(self):
return str(self)
def __hash__(self):
return hash((self._base_type, self.const, self.restrict))
class StructType:
def __init__(self, numpy_type, const=False):
self.const = const
self._dtype = np.dtype(numpy_type)
def __getnewargs__(self):
return self.numpy_dtype, self.const
def __getnewargs_ex__(self):
return (self.numpy_dtype, self.const), {}
@property
def base_type(self):
return None
@property
def numpy_dtype(self):
return self._dtype
@property
def item_size(self):
return self.numpy_dtype.itemsize
def get_element_offset(self, element_name):
return self.numpy_dtype.fields[element_name][1]
def get_element_type(self, element_name):
np_element_type = self.numpy_dtype.fields[element_name][0]
return BasicType(np_element_type, self.const)
def has_element(self, element_name):
return element_name in self.numpy_dtype.fields
def __eq__(self, other):
if not isinstance(other, StructType):
return False
else:
return (self.numpy_dtype, self.const) == (other.numpy_dtype, other.const)
def __str__(self):
# structs are handled byte-wise
result = "uint8_t"
if self.const:
result += " const"
return result
def __repr__(self):
return str(self)
def __hash__(self):
return hash((self.numpy_dtype, self.const))
class TypedImaginaryUnit(TypedSymbol):
def __new__(cls, *args, **kwds):
obj = TypedImaginaryUnit.__xnew_cached_(cls, *args, **kwds)
return obj
def __new_stage2__(cls, dtype):
obj = super(TypedImaginaryUnit, cls).__xnew__(cls,
"_i",
dtype,
imaginary=True)
return obj
headers = ['"cuda_complex.hpp"']
__xnew__ = staticmethod(__new_stage2__)
__xnew_cached_ = staticmethod(cacheit(__new_stage2__))
def __getnewargs__(self):
return (self.dtype,)
def __getnewargs_ex__(self):
return (self.dtype,), {}