Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
pycodegen
lbmpy
Commits
f3d5a430
Commit
f3d5a430
authored
Apr 10, 2018
by
Martin Bauer
Browse files
Rest of PEP8 renaming
parent
1efaf4fe
Changes
33
Hide whitespace changes
Inline
Side-by-side
boundaries/boundaryconditions.py
View file @
f3d5a430
...
...
@@ -20,7 +20,7 @@ class Boundary:
:param pdf_field: pystencils field describing the pdf. The current cell is cell next to the boundary,
which is influenced by the boundary cell i.e. has a link from the boundary cell to
itself.
:param direction_symbol: a sympy symbol that can be used as index to the pdf
F
ield. It describes
:param direction_symbol: a sympy symbol that can be used as index to the pdf
_f
ield. It describes
the direction pointing from the fluid to the boundary cell
:param lb_method: an instance of the LB method used. Use this to adapt the boundary to the method
(e.g. compressibility)
...
...
@@ -32,8 +32,8 @@ class Boundary:
@
property
def
additional_data
(
self
):
"""Return a list of (name, type) tuples for additional data items required in this boundary
These data items can either be initialized in separate kernel see additional
D
ata
K
ernel
I
nit or by
Python callbacks - see additional
D
ata
C
allback """
These data items can either be initialized in separate kernel see additional
_d
ata
_k
ernel
_i
nit or by
Python callbacks - see additional
_d
ata
_c
allback """
return
[]
@
property
...
...
@@ -134,7 +134,7 @@ class UBB(Boundary):
for
d_i
,
v_i
in
zip
(
neighbor
,
velocity
)])
*
weight_of_direction
(
direction
)
# Better alternative: in conserved value computation
# rename what is currently called density to "virtual
D
ensity"
# rename what is currently called density to "virtual
_d
ensity"
# provide a new quantity density, which is constant in case of incompressible models
if
not
lb_method
.
conserved_quantity_computation
.
zero_centered_pdfs
:
cqc
=
lb_method
.
conserved_quantity_computation
...
...
chapman_enskog/chapman_enskog.py
View file @
f3d5a430
...
...
@@ -261,7 +261,7 @@ class LbMethodEqMoments:
moment_tuple
=
ce_moment
.
moment_tuple
moment_symbols
=
[]
for
moment
,
(
eq
V
alue
,
rr
)
in
self
.
_method
.
relaxation_info_dict
.
items
():
for
moment
,
(
eq
_v
alue
,
rr
)
in
self
.
_method
.
relaxation_info_dict
.
items
():
if
isinstance
(
moment
,
tuple
):
moment_symbols
.
append
(
-
rr
**
exponent
*
CeMoment
(
pre_collision_moment_name
,
moment
,
ce_moment
.
superscript
))
...
...
@@ -326,8 +326,8 @@ def substitute_collision_operator_moments(expr, lb_moment_computation, collision
pre_collision_moment_name
=
"
\\
Pi"
):
moments_to_replace
=
[
m
for
m
in
expr
.
atoms
(
CeMoment
)
if
m
.
name
==
collision_op_moment_name
]
subs_dict
=
{}
for
ce
M
oment
in
moments_to_replace
:
subs_dict
[
ce
M
oment
]
=
lb_moment_computation
.
get_post_collision_moment
(
ce
M
oment
,
1
,
pre_collision_moment_name
)
for
ce
_m
oment
in
moments_to_replace
:
subs_dict
[
ce
_m
oment
]
=
lb_moment_computation
.
get_post_collision_moment
(
ce
_m
oment
,
1
,
pre_collision_moment_name
)
return
expr
.
subs
(
subs_dict
)
...
...
@@ -341,10 +341,10 @@ def take_moments(eqn, pdf_to_moment_name=(('f', '\Pi'), ('\Omega f', '\\Upsilon'
velocity_terms
=
tuple
(
expanded_symbol
(
velocity_name
,
subscript
=
i
)
for
i
in
range
(
3
))
def
determine_f_index
(
factor
):
FIndex
=
namedtuple
(
"FIndex"
,
[
'moment
N
ame'
,
'superscript'
])
for
symbol
L
ist
I
d
,
pdf
S
ymbols
E
lement
in
enumerate
(
pdf_symbols
):
FIndex
=
namedtuple
(
"FIndex"
,
[
'moment
_n
ame'
,
'superscript'
])
for
symbol
_l
ist
_i
d
,
pdf
_s
ymbols
_e
lement
in
enumerate
(
pdf_symbols
):
try
:
return
FIndex
(
pdf_to_moment_name
[
symbol
L
ist
I
d
][
1
],
pdf
S
ymbols
E
lement
.
index
(
factor
))
return
FIndex
(
pdf_to_moment_name
[
symbol
_l
ist
_i
d
][
1
],
pdf
_s
ymbols
_e
lement
.
index
(
factor
))
except
ValueError
:
pass
return
None
...
...
@@ -370,13 +370,13 @@ def take_moments(eqn, pdf_to_moment_name=(('f', '\Pi'), ('\Omega f', '\\Upsilon'
f_index
=
new_f_index
moment_tuple
=
[
0
]
*
len
(
velocity_terms
)
for
c
I
dx
in
c_indices
:
moment_tuple
[
c
I
dx
]
+=
1
for
c
_i
dx
in
c_indices
:
moment_tuple
[
c
_i
dx
]
+=
1
moment_tuple
=
tuple
(
moment_tuple
)
if
use_one_neighborhood_aliasing
:
moment_tuple
=
non_aliased_moment
(
moment_tuple
)
result
=
CeMoment
(
f_index
.
moment
N
ame
,
moment_tuple
,
f_index
.
superscript
)
result
=
CeMoment
(
f_index
.
moment
_n
ame
,
moment_tuple
,
f_index
.
superscript
)
if
derivative_term
is
not
None
:
result
=
derivative_term
.
change_arg_recursive
(
result
)
result
*=
rest
...
...
@@ -510,7 +510,7 @@ def chapman_enskog_ansatz(equation, time_derivative_orders=(1, 3), spatial_deriv
equation: equation to expand
time_derivative_orders: tuple describing range for time derivative to expand
spatial_derivative_orders: tuple describing range for spatial derivatives to expand
pdfs: symbols to expand: sequence of triples (symbol
N
ame, start
O
rder, end
O
rder)
pdfs: symbols to expand: sequence of triples (symbol
_n
ame, start
_o
rder, end
_o
rder)
commutative: can be set to False to have non-commutative pdf symbols
Returns:
tuple mapping epsilon order to equation
...
...
@@ -533,15 +533,15 @@ def chapman_enskog_ansatz(equation, time_derivative_orders=(1, 3), spatial_deriv
expanded_pdf_symbols
=
[]
max_expansion_order
=
spatial_derivative_orders
[
1
]
if
spatial_derivative_orders
else
10
for
pdf_name
,
start
O
rder
,
stop
O
rder
in
pdfs
:
for
pdf_name
,
start
_o
rder
,
stop
_o
rder
in
pdfs
:
if
isinstance
(
pdf_name
,
sp
.
Symbol
):
pdf_name
=
pdf_name
.
name
expanded_pdf_symbols
+=
[
expanded_symbol
(
pdf_name
,
superscript
=
i
,
commutative
=
commutative
)
for
i
in
range
(
start
O
rder
,
stop
O
rder
)]
for
i
in
range
(
start
_o
rder
,
stop
_o
rder
)]
pdf_symbol
=
sp
.
Symbol
(
pdf_name
,
commutative
=
commutative
)
subs_dict
[
pdf_symbol
]
=
sum
(
eps
**
i
*
expanded_symbol
(
pdf_name
,
superscript
=
i
,
commutative
=
commutative
)
for
i
in
range
(
start
O
rder
,
stop
O
rder
))
max_expansion_order
=
max
(
max_expansion_order
,
stop
O
rder
)
for
i
in
range
(
start
_o
rder
,
stop
_o
rder
))
max_expansion_order
=
max
(
max_expansion_order
,
stop
_o
rder
)
equation
=
equation
.
subs
(
subs_dict
)
equation
=
expand_using_linearity
(
equation
,
functions
=
expanded_pdf_symbols
).
expand
().
collect
(
eps
)
result
=
{
eps_order
:
equation
.
coeff
(
eps
**
eps_order
)
for
eps_order
in
range
(
1
,
2
*
max_expansion_order
)}
...
...
@@ -568,7 +568,7 @@ def match_to_navier_stokes(conservation_equations, rho=sp.Symbol("rho"), u=sp.sy
def
match_moment_eqs
(
moment_eqs
,
is_compressible
):
shear_and_pressure_eqs
=
[]
for
i
,
mom
E
q
in
enumerate
(
moment_eqs
):
for
i
,
mom
_e
q
in
enumerate
(
moment_eqs
):
factor
=
rho
if
is_compressible
else
1
ref
=
diff_simplify
(
Diff
(
factor
*
u
[
i
],
t
)
+
sum
(
Diff
(
factor
*
u
[
i
]
*
u
[
j
],
j
)
for
j
in
range
(
dim
)))
shear_and_pressure_eqs
.
append
(
diff_simplify
(
moment_eqs
[
i
])
-
ref
)
...
...
@@ -590,8 +590,8 @@ def match_to_navier_stokes(conservation_equations, rho=sp.Symbol("rho"), u=sp.sy
sigma_
=
sp
.
zeros
(
dim
)
error_terms_
=
[]
for
i
,
shear
AndP
ressure
E
q
in
enumerate
(
shear_and_pressure_eqs
):
eq_without_pressure
=
shear
AndP
ressure
E
q
-
sum
(
coeff
*
Diff
(
arg
,
i
)
for
coeff
,
arg
in
pressure_terms
)
for
i
,
shear
_and_p
ressure
_e
q
in
enumerate
(
shear_and_pressure_eqs
):
eq_without_pressure
=
shear
_and_p
ressure
_e
q
-
sum
(
coeff
*
Diff
(
arg
,
i
)
for
coeff
,
arg
in
pressure_terms
)
for
d
in
eq_without_pressure
.
atoms
(
Diff
):
eq_without_pressure
=
eq_without_pressure
.
collect
(
d
)
sigma_
[
i
,
d
.
target
]
+=
eq_without_pressure
.
coeff
(
d
)
*
d
.
arg
...
...
chapman_enskog/chapman_enskog_higher_order.py
View file @
f3d5a430
...
...
@@ -69,17 +69,17 @@ def determine_higher_order_moments(epsilon_hierarchy, relaxation_rates, moment_c
time_diffs
=
OrderedDict
()
non_eq_moms
=
OrderedDict
()
for
eps
O
rder
in
range
(
1
,
order
):
eps_eq
=
epsilon_hierarchy
[
eps
O
rder
]
for
eps
_o
rder
in
range
(
1
,
order
):
eps_eq
=
epsilon_hierarchy
[
eps
_o
rder
]
for
order
in
range
(
order
+
1
):
eqs
=
sp
.
Matrix
([
full_expand
(
tm
(
eps_eq
*
m
))
for
m
in
poly_moments
(
order
,
dim
)])
unknown_moments
=
[
m
for
m
in
eqs
.
atoms
(
CeMoment
)
if
m
.
superscript
==
eps
O
rder
and
sum
(
m
.
moment_tuple
)
==
order
]
if
m
.
superscript
==
eps
_o
rder
and
sum
(
m
.
moment_tuple
)
==
order
]
if
len
(
unknown_moments
)
==
0
:
for
eq
in
eqs
:
time_diffs_in_expr
=
[
d
for
d
in
eq
.
atoms
(
Diff
)
if
(
d
.
target
==
't'
or
d
.
target
==
sp
.
Symbol
(
"t"
))
and
d
.
superscript
==
eps
O
rder
]
(
d
.
target
==
't'
or
d
.
target
==
sp
.
Symbol
(
"t"
))
and
d
.
superscript
==
eps
_o
rder
]
if
len
(
time_diffs_in_expr
)
==
0
:
continue
assert
len
(
time_diffs_in_expr
)
==
1
,
\
...
...
chapman_enskog/chapman_enskog_steady_state.py
View file @
f3d5a430
...
...
@@ -13,12 +13,12 @@ class SteadyStateChapmanEnskogAnalysis:
self
.
method
=
method
self
.
dim
=
method
.
dim
self
.
order
=
order
self
.
physical
V
ariables
=
list
(
sp
.
Matrix
(
self
.
method
.
moment_equilibrium_values
).
atoms
(
sp
.
Symbol
))
# rho, u..
self
.
physical
_v
ariables
=
list
(
sp
.
Matrix
(
self
.
method
.
moment_equilibrium_values
).
atoms
(
sp
.
Symbol
))
# rho, u..
self
.
eps
=
sp
.
Symbol
(
"epsilon"
)
self
.
f_sym
=
sp
.
Symbol
(
"f"
,
commutative
=
False
)
self
.
f_syms
=
[
expanded_symbol
(
"f"
,
superscript
=
i
,
commutative
=
False
)
for
i
in
range
(
order
+
1
)]
self
.
collision
OpS
ym
=
sp
.
Symbol
(
"A"
,
commutative
=
False
)
self
.
collision
_op_s
ym
=
sp
.
Symbol
(
"A"
,
commutative
=
False
)
self
.
force_sym
=
sp
.
Symbol
(
"F_q"
,
commutative
=
False
)
self
.
velocity_syms
=
sp
.
Matrix
([
expanded_symbol
(
"c"
,
subscript
=
i
,
commutative
=
False
)
for
i
in
range
(
self
.
dim
)])
...
...
@@ -26,34 +26,34 @@ class SteadyStateChapmanEnskogAnalysis:
self
.
force_model
=
None
if
force_model_class
:
acceleration_symbols
=
sp
.
symbols
(
"a_:%d"
%
(
self
.
dim
,),
commutative
=
False
)
self
.
physical
V
ariables
+=
acceleration_symbols
self
.
physical
_v
ariables
+=
acceleration_symbols
self
.
force_model
=
force_model_class
(
acceleration_symbols
)
self
.
F_q
=
self
.
force_model
(
self
.
method
)
# Perform the analysis
self
.
taylored
E
quation
=
self
.
_create_taylor_expanded_equation
()
inserted_hierarchy
,
raw_hierarchy
=
self
.
_create_pdf_hierarchy
(
self
.
taylored
E
quation
)
self
.
pdf
H
ierarchy
=
inserted_hierarchy
self
.
pdf
H
ierarchy
R
aw
=
raw_hierarchy
self
.
recombined
E
q
=
self
.
_recombine_pdfs
(
self
.
pdf
H
ierarchy
)
self
.
taylored
_e
quation
=
self
.
_create_taylor_expanded_equation
()
inserted_hierarchy
,
raw_hierarchy
=
self
.
_create_pdf_hierarchy
(
self
.
taylored
_e
quation
)
self
.
pdf
_h
ierarchy
=
inserted_hierarchy
self
.
pdf
_h
ierarchy
_r
aw
=
raw_hierarchy
self
.
recombined
_e
q
=
self
.
_recombine_pdfs
(
self
.
pdf
_h
ierarchy
)
symbols_to_values
=
self
.
_get_symbols_to_values_dict
()
self
.
continuity
E
quation
=
self
.
_compute_continuity_equation
(
self
.
recombined
E
q
,
symbols_to_values
)
self
.
momentum
E
quations
=
[
self
.
_compute_momentum_equation
(
self
.
recombined
E
q
,
symbols_to_values
,
h
)
for
h
in
range
(
self
.
dim
)]
self
.
continuity
_e
quation
=
self
.
_compute_continuity_equation
(
self
.
recombined
_e
q
,
symbols_to_values
)
self
.
momentum
_e
quations
=
[
self
.
_compute_momentum_equation
(
self
.
recombined
_e
q
,
symbols_to_values
,
h
)
for
h
in
range
(
self
.
dim
)]
def
get_pdf_hierarchy
(
self
,
order
,
collision_operator_symbol
=
sp
.
Symbol
(
"omega"
)):
def
substitute_non_commuting_symbols
(
eq
):
return
eq
.
subs
({
a
:
sp
.
Symbol
(
a
.
name
)
for
a
in
eq
.
atoms
(
sp
.
Symbol
)})
result
=
self
.
pdf
H
ierarchy
[
order
].
subs
(
self
.
collision
OpS
ym
,
collision_operator_symbol
)
result
=
self
.
pdf
_h
ierarchy
[
order
].
subs
(
self
.
collision
_op_s
ym
,
collision_operator_symbol
)
result
=
normalize_diff_order
(
result
,
functions
=
(
self
.
f_syms
[
0
],
self
.
force_sym
))
return
substitute_non_commuting_symbols
(
result
)
def
get_continuity_equation
(
self
,
only_order
=
None
):
return
self
.
_extract_order
(
self
.
continuity
E
quation
,
only_order
)
return
self
.
_extract_order
(
self
.
continuity
_e
quation
,
only_order
)
def
get_momentum_equation
(
self
,
only_order
=
None
):
return
[
self
.
_extract_order
(
e
,
only_order
)
for
e
in
self
.
momentum
E
quations
]
return
[
self
.
_extract_order
(
e
,
only_order
)
for
e
in
self
.
momentum
_e
quations
]
def
_extract_order
(
self
,
eq
,
order
):
if
order
is
None
:
...
...
@@ -76,7 +76,7 @@ class SteadyStateChapmanEnskogAnalysis:
taylor_expansion
=
DiffOperator
.
apply
(
differential_operator
.
expand
(),
self
.
f_sym
)
f_non_eq
=
self
.
f_sym
-
self
.
f_syms
[
0
]
return
taylor_expansion
+
self
.
collision
OpS
ym
*
f_non_eq
-
self
.
eps
*
self
.
force_sym
return
taylor_expansion
+
self
.
collision
_op_s
ym
*
f_non_eq
-
self
.
eps
*
self
.
force_sym
def
_create_pdf_hierarchy
(
self
,
taylored_equation
):
"""
...
...
@@ -90,8 +90,8 @@ class SteadyStateChapmanEnskogAnalysis:
inserted_hierarchy
=
[]
raw_hierarchy
=
[]
substitution_dict
=
{}
for
ce
E
q
,
f_i
in
zip
(
chapman_enskog_hierarchy
,
self
.
f_syms
):
new_eq
=
-
1
/
self
.
collision
OpS
ym
*
(
ce
E
q
-
self
.
collision
OpS
ym
*
f_i
)
for
ce
_e
q
,
f_i
in
zip
(
chapman_enskog_hierarchy
,
self
.
f_syms
):
new_eq
=
-
1
/
self
.
collision
_op_s
ym
*
(
ce
_e
q
-
self
.
collision
_op_s
ym
*
f_i
)
raw_hierarchy
.
append
(
new_eq
)
new_eq
=
expand_using_linearity
(
new_eq
.
subs
(
substitution_dict
),
functions
=
self
.
f_syms
+
[
self
.
force_sym
])
if
new_eq
:
...
...
@@ -110,14 +110,14 @@ class SteadyStateChapmanEnskogAnalysis:
eq
=
sp
.
expand
(
self
.
velocity_syms
[
coordinate
]
*
recombined_eq
)
result
=
self
.
_compute_moments
(
eq
,
symbols_to_values
)
if
self
.
force_model
and
hasattr
(
self
.
force_model
,
'equilibrium
V
elocity
S
hift'
):
if
self
.
force_model
and
hasattr
(
self
.
force_model
,
'equilibrium
_v
elocity
_s
hift'
):
compressible
=
self
.
method
.
conserved_quantity_computation
.
compressible
shift
=
self
.
force_model
.
equilibrium
V
elocity
S
hift
(
sp
.
Symbol
(
"rho"
)
if
compressible
else
1
)
shift
=
self
.
force_model
.
equilibrium
_v
elocity
_s
hift
(
sp
.
Symbol
(
"rho"
)
if
compressible
else
1
)
result
+=
shift
[
coordinate
]
return
result
def
_get_symbols_to_values_dict
(
self
):
result
=
{
1
/
self
.
collision
OpS
ym
:
self
.
method
.
inverse_collision_matrix
,
result
=
{
1
/
self
.
collision
_op_s
ym
:
self
.
method
.
inverse_collision_matrix
,
self
.
force_sym
:
sp
.
Matrix
(
self
.
force_model
(
self
.
method
))
if
self
.
force_model
else
0
,
self
.
f_syms
[
0
]:
self
.
method
.
get_equilibrium_terms
()}
for
i
,
c_i
in
enumerate
(
self
.
velocity_syms
):
...
...
@@ -163,7 +163,7 @@ class SteadyStateChapmanEnskogAnalysis:
new_products
.
append
(
new_prod
)
return
normalize_diff_order
(
expand_using_linearity
(
sp
.
Add
(
*
new_products
),
functions
=
self
.
physical
V
ariables
))
return
normalize_diff_order
(
expand_using_linearity
(
sp
.
Add
(
*
new_products
),
functions
=
self
.
physical
_v
ariables
))
# ----------------------------------------------------------------------------------------------------------------------
...
...
continuous_distribution_measures.py
View file @
f3d5a430
...
...
@@ -20,7 +20,7 @@ def moment_generating_function(generating_function, symbols, symbols_in_result):
:param generating_function: sympy expression
:param symbols: a sequence of symbols forming the vector x
:param symbols_in_result: a sequence forming the vector t
:return: transformation result F: an expression that depends now on symbols
InR
esult
:return: transformation result F: an expression that depends now on symbols
_in_r
esult
(symbols have been integrated out)
.. note::
...
...
creationfunctions.py
View file @
f3d5a430
...
...
@@ -90,11 +90,11 @@ Simplifications / Transformations:
Field size information:
- ``pdf
A
rr=None``: pass a numpy array here to create kernels with fixed size and create the loop nest according
to layout
of this array
- ``pdf
_a
rr=None``: pass a numpy array here to create kernels with fixed size and create the loop nest according
to layout
of this array
- ``field_size=None``: create kernel for fixed field size
- ``field_layout='c'``: ``'c'`` or ``'numpy'`` for standard numpy layout, ``'reverse
N
umpy'`` or ``'f'`` for fortran
layout, this does not apply when pdf
A
rr was given, then the same layout as pdf
A
rr is used
- ``field_layout='c'``: ``'c'`` or ``'numpy'`` for standard numpy layout, ``'reverse
_n
umpy'`` or ``'f'`` for fortran
layout, this does not apply when pdf
_a
rr was given, then the same layout as pdf
_a
rr is used
GPU:
...
...
@@ -102,7 +102,7 @@ GPU:
and installed *pycuda* package
- ``gpu_indexing='block'``: determines mapping of CUDA threads to cells. Can be either ``'block'`` or ``'line'``
- ``gpu_indexing_params='block'``: parameters passed to init function of gpu indexing.
For ``'block'`` indexing one can e.g. specify the block size ``{'block
S
ize' : (128, 4, 1)}``
For ``'block'`` indexing one can e.g. specify the block size ``{'block
_s
ize' : (128, 4, 1)}``
Other:
...
...
@@ -173,10 +173,10 @@ from lbmpy.updatekernels import StreamPullTwoFieldsAccessor, PeriodicTwoFieldsAc
def
create_lb_function
(
ast
=
None
,
optimization
=
{},
**
kwargs
):
params
,
opt
P
arams
=
update_with_default_parameters
(
kwargs
,
optimization
)
params
,
opt
_p
arams
=
update_with_default_parameters
(
kwargs
,
optimization
)
if
ast
is
None
:
params
[
'optimization'
]
=
opt
P
arams
params
[
'optimization'
]
=
opt
_p
arams
ast
=
create_lb_ast
(
**
params
)
res
=
ast
.
compile
()
...
...
@@ -279,7 +279,7 @@ def create_lb_collision_rule(lb_method=None, optimization={}, **kwargs):
if
params
[
'velocity_input'
]
is
not
None
:
eqs
=
[
Assignment
(
cqc
.
zeroth_order_moment_symbol
,
sum
(
lb_method
.
pre_collision_pdf_symbols
))]
velocity_field
=
params
[
'velocity_input'
]
eqs
+=
[
Assignment
(
u
S
ym
,
velocity_field
(
i
))
for
i
,
u
S
ym
in
enumerate
(
cqc
.
first_order_moment_symbols
)]
eqs
+=
[
Assignment
(
u
_s
ym
,
velocity_field
(
i
))
for
i
,
u
_s
ym
in
enumerate
(
cqc
.
first_order_moment_symbols
)]
eqs
=
AssignmentCollection
(
eqs
,
[])
collision_rule
=
lb_method
.
get_collision_rule
(
conserved_quantity_equations
=
eqs
)
else
:
...
...
@@ -366,7 +366,7 @@ def create_lb_method_from_existing(method, modification_function):
Args:
method: old method
modification_function: function receiving (moment, equilibrium
V
alue, relaxation_rate) as arguments,
modification_function: function receiving (moment, equilibrium
_v
alue, relaxation_rate) as arguments,
i.e. one row of the relaxation table, returning a modified version
"""
relaxation_table
=
(
modification_function
(
m
,
eq
,
rr
)
...
...
@@ -463,7 +463,7 @@ def update_with_default_parameters(params, opt_params=None, fail_on_unknown_para
'split'
:
False
,
'field_size'
:
None
,
'field_layout'
:
'fzyx'
,
# can be 'numpy' (='c'), 'reverse
N
umpy' (='f'), 'fzyx', 'zyxf'
'field_layout'
:
'fzyx'
,
# can be 'numpy' (='c'), 'reverse
_n
umpy' (='f'), 'fzyx', 'zyxf'
'symbolic_field'
:
None
,
'target'
:
'cpu'
,
...
...
cumulants.py
View file @
f3d5a430
...
...
@@ -52,7 +52,7 @@ def __cumulant_raw_moment_transform(index, dependent_var_dict, outer_function, d
index: tuple describing the index of the cumulant/moment to express as function of moments/cumulants
dependent_var_dict: a dictionary from index tuple to moments/cumulants symbols, or None to use default symbols
outer_function: logarithm to transform from moments->cumulants, exp for inverse direction
default_prefix: if dependent
VarD
ict is None, this is used to construct symbols of the form prefix_i_j_k
default_prefix: if dependent
_var_d
ict is None, this is used to construct symbols of the form prefix_i_j_k
centralized: if True the first order moments/cumulants are set to zero
"""
dim
=
len
(
index
)
...
...
@@ -74,8 +74,8 @@ def __cumulant_raw_moment_transform(index, dependent_var_dict, outer_function, d
# index (2,1,0) means differentiate twice w.r.t to first variable, and once w.r.t to second variable
# this is transformed here into representation [0,0,1] such that each entry is one diff operation
partition_list
=
[]
for
i
,
index
C
omponent
in
enumerate
(
index
):
for
j
in
range
(
index
C
omponent
):
for
i
,
index
_c
omponent
in
enumerate
(
index
):
for
j
in
range
(
index
_c
omponent
):
partition_list
.
append
(
i
)
if
len
(
partition_list
)
==
0
:
# special case for zero index
...
...
fieldaccess.py
View file @
f3d5a430
...
...
@@ -68,22 +68,22 @@ class PeriodicTwoFieldsAccessor(PdfFieldAccessor):
for
i
,
d
in
enumerate
(
stencil
):
pull_direction
=
inverse_direction
(
d
)
periodic_pull_direction
=
[]
for
coord
I
d
,
dir
E
lement
in
enumerate
(
pull_direction
):
if
not
self
.
_periodicity
[
coord
I
d
]:
periodic_pull_direction
.
append
(
dir
E
lement
)
for
coord
_i
d
,
dir
_e
lement
in
enumerate
(
pull_direction
):
if
not
self
.
_periodicity
[
coord
_i
d
]:
periodic_pull_direction
.
append
(
dir
_e
lement
)
continue
lower_limit
=
self
.
_ghostLayers
upper_limit
=
field
.
spatial_shape
[
coord
I
d
]
-
1
-
self
.
_ghostLayers
upper_limit
=
field
.
spatial_shape
[
coord
_i
d
]
-
1
-
self
.
_ghostLayers
limit_diff
=
upper_limit
-
lower_limit
loop_counter
=
LoopOverCoordinate
.
get_loop_counter_symbol
(
coord
I
d
)
if
dir
E
lement
==
0
:
loop_counter
=
LoopOverCoordinate
.
get_loop_counter_symbol
(
coord
_i
d
)
if
dir
_e
lement
==
0
:
periodic_pull_direction
.
append
(
0
)
elif
dir
E
lement
==
1
:
new_dir_element
=
sp
.
Piecewise
((
dir
E
lement
,
loop_counter
<
upper_limit
),
(
-
limit_diff
,
True
))
elif
dir
_e
lement
==
1
:
new_dir_element
=
sp
.
Piecewise
((
dir
_e
lement
,
loop_counter
<
upper_limit
),
(
-
limit_diff
,
True
))
periodic_pull_direction
.
append
(
new_dir_element
)
elif
dir
E
lement
==
-
1
:
new_dir_element
=
sp
.
Piecewise
((
dir
E
lement
,
loop_counter
>
lower_limit
),
(
limit_diff
,
True
))
elif
dir
_e
lement
==
-
1
:
new_dir_element
=
sp
.
Piecewise
((
dir
_e
lement
,
loop_counter
>
lower_limit
),
(
limit_diff
,
True
))
periodic_pull_direction
.
append
(
new_dir_element
)
else
:
raise
NotImplementedError
(
"This accessor supports only nearest neighbor stencils"
)
...
...
@@ -127,9 +127,9 @@ def visualize_field_mapping(axes, stencil, field_mapping, color='b'):
from
lbmpy.plot2d
import
LbGrid
grid
=
LbGrid
(
3
,
3
)
grid
.
fill_with_default_arrows
()
for
field
A
ccess
,
direction
in
zip
(
field_mapping
,
stencil
):
field_position
=
stencil
[
field
A
ccess
.
index
[
0
]]
neighbor
=
field
A
ccess
.
offsets
for
field
_a
ccess
,
direction
in
zip
(
field_mapping
,
stencil
):
field_position
=
stencil
[
field
_a
ccess
.
index
[
0
]]
neighbor
=
field
_a
ccess
.
offsets
grid
.
add_arrow
((
1
+
neighbor
[
0
],
1
+
neighbor
[
1
]),
arrow_position
=
field_position
,
arrow_direction
=
direction
,
color
=
color
)
grid
.
draw
(
axes
)
...
...
forcemodels.py
View file @
f3d5a430
...
...
@@ -110,14 +110,14 @@ class Simple(object):
def
__call__
(
self
,
lb_method
,
**
kwargs
):
assert
len
(
self
.
_force
)
==
lb_method
.
dim
def
scalar
P
roduct
(
a
,
b
):
def
scalar
_p
roduct
(
a
,
b
):
return
sum
(
a_i
*
b_i
for
a_i
,
b_i
in
zip
(
a
,
b
))
return
[
3
*
w_i
*
scalar
P
roduct
(
self
.
_force
,
direction
)
return
[
3
*
w_i
*
scalar
_p
roduct
(
self
.
_force
,
direction
)
for
direction
,
w_i
in
zip
(
lb_method
.
stencil
,
lb_method
.
weights
)]
def
macroscopic_velocity_shift
(
self
,
density
):
return
default
V
elocity
S
hift
(
density
,
self
.
_force
)
return
default
_v
elocity
_s
hift
(
density
,
self
.
_force
)
class
Luo
(
object
):
...
...
@@ -139,7 +139,7 @@ class Luo(object):
return
result
def
macroscopic_velocity_shift
(
self
,
density
):
return
default
V
elocity
S
hift
(
density
,
self
.
_force
)
return
default
_v
elocity
_s
hift
(
density
,
self
.
_force
)
class
Guo
(
object
):
...
...
@@ -153,15 +153,15 @@ class Guo(object):
def
__call__
(
self
,
lb_method
):
luo
=
Luo
(
self
.
_force
)
shear
R
elaxation
R
ate
=
get_shear_relaxation_rate
(
lb_method
)
correction
F
actor
=
(
1
-
sp
.
Rational
(
1
,
2
)
*
shear
R
elaxation
R
ate
)
return
[
correction
F
actor
*
t
for
t
in
luo
(
lb_method
)]
shear
_r
elaxation
_r
ate
=
get_shear_relaxation_rate
(
lb_method
)
correction
_f
actor
=
(
1
-
sp
.
Rational
(
1
,
2
)
*
shear
_r
elaxation
_r
ate
)
return
[
correction
_f
actor
*
t
for
t
in
luo
(
lb_method
)]
def
macroscopic_velocity_shift
(
self
,
density
):
return
default
V
elocity
S
hift
(
density
,
self
.
_force
)
return
default
_v
elocity
_s
hift
(
density
,
self
.
_force
)
def
equilibrium
V
elocity
S
hift
(
self
,
density
):
return
default
V
elocity
S
hift
(
density
,
self
.
_force
)
def
equilibrium
_v
elocity
_s
hift
(
self
,
density
):
return
default
_v
elocity
_s
hift
(
density
,
self
.
_force
)
class
Buick
(
object
):
...
...
@@ -176,15 +176,15 @@ class Buick(object):
def
__call__
(
self
,
lb_method
,
**
kwargs
):
simple
=
Simple
(
self
.
_force
)
shear
R
elaxation
R
ate
=
get_shear_relaxation_rate
(
lb_method
)
correction
F
actor
=
(
1
-
sp
.
Rational
(
1
,
2
)
*
shear
R
elaxation
R
ate
)
return
[
correction
F
actor
*
t
for
t
in
simple
(
lb_method
)]
shear
_r
elaxation
_r
ate
=
get_shear_relaxation_rate
(
lb_method
)
correction
_f
actor
=
(
1
-
sp
.
Rational
(
1
,
2
)
*
shear
_r
elaxation
_r
ate
)
return
[
correction
_f
actor
*
t
for
t
in
simple
(
lb_method
)]
def
macroscopic_velocity_shift
(
self
,
density
):
return
default
V
elocity
S
hift
(
density
,
self
.
_force
)
return
default
_v
elocity
_s
hift
(
density
,
self
.
_force
)
def
equilibrium
V
elocity
S
hift
(
self
,
density
):
return
default
V
elocity
S
hift
(
density
,
self
.
_force
)
def
equilibrium
_v
elocity
_s
hift
(
self
,
density
):
return
default
_v
elocity
_s
hift
(
density
,
self
.
_force
)
class
EDM
(
object
):
...
...
@@ -198,18 +198,18 @@ class EDM(object):
rho
=
cqc
.
zeroth_order_moment_symbol
if
cqc
.
compressible
else
1
u
=
cqc
.
first_order_moment_symbols
shifted
U
=
(
u_i
+
f_i
/
rho
for
u_i
,
f_i
in
zip
(
u
,
self
.
_force
))
eq
T
erms
=
lb_method
.
get_equilibrium_terms
()
shifted
E
q
=
eq
T
erms
.
subs
({
u_i
:
su_i
for
u_i
,
su_i
in
zip
(
u
,
shifted
U
)})
return
shifted
E
q
-
eq
T
erms
shifted
_u
=
(
u_i
+
f_i
/
rho
for
u_i
,
f_i
in
zip
(
u
,
self
.
_force
))
eq
_t
erms
=
lb_method
.
get_equilibrium_terms
()
shifted
_e
q
=
eq
_t
erms
.
subs
({
u_i
:
su_i
for
u_i
,
su_i
in
zip
(
u
,
shifted
_u
)})
return
shifted
_e
q
-
eq
_t
erms
def
macroscopic_velocity_shift
(
self
,
density
):
return
default
V
elocity
S
hift
(
density
,
self
.
_force
)
return
default
_v
elocity
_s
hift
(
density
,
self
.
_force
)
# -------------------------------- Helper functions ------------------------------------------------------------------
def
default
V
elocity
S
hift
(
density
,
force
):
def
default
_v
elocity
_s
hift
(
density
,
force
):
return
[
f_i
/
(
2
*
density
)
for
f_i
in
force
]
geometry.py
View file @
f3d5a430
...
...
@@ -87,8 +87,8 @@ def add_pipe(boundary_handling, diameter, boundary=NoSlip()):
:param boundary_handling: boundary handling object, works for serial and parallel versions
:param diameter: pipe diameter, can be either a constant value or a callback function.
the callback function has the signature (x
C
oord
A
rray, domain
S
hape
InC
ells) and
p
has to return
a array of same shape as the received x
C
oord
A
rray, with the diameter for each x position
the callback function has the signature (x
_c
oord
_a
rray, domain
_s
hape
_in_c
ells) and has to return
a array of same shape as the received x
_c
oord
_a
rray, with the diameter for each x position
:param boundary: boundary object that is set at the wall, defaults to NoSlip (bounce back)
"""
domain_shape
=
boundary_handling
.
shape
...
...
lbstep.py
View file @
f3d5a430
...
...
@@ -18,7 +18,7 @@ class LatticeBoltzmannStep:
kernel_params
=
MappingProxyType
({}),
data_handling
=
None
,
name
=
"lbm"
,
optimization
=
None
,
velocity_data_name
=
None
,
density_data_name
=
None
,
density_data_index
=
None
,
compute_velocity_in_every_step
=
False
,
compute_density_in_every_step
=
False
,
velocity_input_array_name
=
None
,
time_step_order
=
'stream
C
ollide'
,
flag_interface
=
None
,
velocity_input_array_name
=
None
,
time_step_order
=
'stream
_c
ollide'
,
flag_interface
=
None
,
**
method_parameters
):
# --- Parameter normalization ---
...
...
@@ -87,10 +87,10 @@ class LatticeBoltzmannStep:
optimization
[
'symbolic_field'
]
=
data_handling
.
fields
[
self
.
_pdf_arr_name
]
method_parameters
[
'field_name'
]
=
self
.
_pdf_arr_name
method_parameters
[
'temporary_field_name'
]
=
self
.
_tmp_arr_name
if
time_step_order
==
'stream
C
ollide'
:
if
time_step_order
==
'stream
_c
ollide'
:
self
.
_lbmKernels
=
[
create_lb_function
(
optimization
=
optimization
,
**
method_parameters
)]
elif
time_step_order
==
'collide
S
tream'
:
elif
time_step_order
==
'collide
_s
tream'
:
self
.
_lbmKernels
=
[
create_lb_function
(
optimization
=
optimization
,
kernel_type
=
'collide_only'
,
**
method_parameters
),
...
...
@@ -224,9 +224,9 @@ class LatticeBoltzmannStep:
self
.
_data_handling
.
to_cpu
(
self
.
_pdf_arr_name
)
self
.
_data_handling
.
run_kernel
(
self
.
_getterKernel
,
**
self
.
kernel_params
)
def
run
(
self
,
time
S
teps
):
def
run
(
self
,
time
_s
teps
):
self
.
pre_run
()
for
i
in
range
(
time
S
teps
):
for
i
in
range
(
time
_s
teps
):
self
.
time_step
()
self
.
post_run
()
...
...
macroscopic_value_kernels.py
View file @
f3d5a430
...
...
@@ -3,57 +3,59 @@ from pystencils.field import Field, get_layout_of_array
from
lbmpy.simplificationfactory
import
create_simplification_strategy
def
compile
M
acroscopic
V
alues
G
etter
(
lb_method
,
output
Q
uantities
,
pdf
A
rr
=
None
,
field_layout
=
'numpy'
,
target
=
'cpu'
):
def
compile
_m
acroscopic
_v
alues
_g
etter
(
lb_method
,
output
_q
uantities
,
pdf
_a
rr
=
None
,
field_layout
=
'numpy'
,
target
=
'cpu'
):
"""
Create kernel to compute macroscopic value(s) from a pdf field (e.g. density or velocity)
:param lb_method: instance of :class:`lbmpy.methods.AbstractLbMethod`
:param output
Q
uantities: sequence of quantities to compute e.g. ['density', 'velocity']
:param pdf
A
rr: optional numpy array for pdf field - used to get optimal loop structure for kernel
:param field_layout: layout for output field, also used for pdf field if pdf
A
rr is not given
:param output
_q
uantities: sequence of quantities to compute e.g. ['density', 'velocity']
:param pdf
_a
rr: optional numpy array for pdf field - used to get optimal loop structure for kernel
:param field_layout: layout for output field, also used for pdf field if pdf
_a
rr is not given
:param target: 'cpu' or 'gpu'
:return: a function to compute macroscopic values:
- pdf_array
- keyword arguments from name of conserved quantity (as in output
Q
uantities) to numpy field
- keyword arguments from name of conserved quantity (as in output
_q
uantities) to numpy field
"""
if
not
(
isinstance
(
output
Q
uantities
,
list
)
or
isinstance
(
output
Q
uantities
,
tuple
)):
output
Q
uantities
=
[
output
Q
uantities
]
if
not
(
isinstance
(
output
_q
uantities
,
list
)
or
isinstance
(
output
_q
uantities
,
tuple
)):
output
_q
uantities
=
[
output
_q
uantities
]
cqc
=
lb_method
.
conserved_quantity_computation
unknown
Q
uantities
=
[
oq
for
oq
in
output
Q
uantities
if
oq
not
in
cqc
.
conserved_quantities
]
if
unknown
Q
uantities
:
unknown
_q
uantities
=
[
oq
for
oq
in
output
_q
uantities
if
oq
not
in
cqc
.
conserved_quantities
]
if
unknown
_q
uantities
:
raise
ValueError
(
"No such conserved quantity: %s, conserved quantities are %s"
%
(
str
(
unknown
Q
uantities
),
str
(
cqc
.
conserved_quantities
.
keys
())))
(
str
(
unknown
_q
uantities
),
str
(
cqc
.
conserved_quantities
.
keys
())))
if
pdf
A
rr
is
None
:
pdf
F
ield
=
Field
.
create_generic
(
'pdfs'
,
lb_method
.
dim
,
index_dimensions
=
1
,
layout
=
field_layout
)
if
pdf
_a
rr
is
None
:
pdf
_f
ield
=
Field
.
create_generic
(
'pdfs'
,
lb_method
.
dim
,
index_dimensions
=
1
,
layout
=
field_layout
)
else
:
pdf
F
ield
=
Field
.
create_from_numpy_array
(
'pdfs'
,
pdf
A
rr
,
index_dimensions
=
1
)
pdf
_f
ield
=
Field
.
create_from_numpy_array
(
'pdfs'
,
pdf
_a
rr
,
index_dimensions
=
1
)
output
M
apping
=
{}
for
output
Q
uantity
in
output
Q
uantities
:
number
OfE
lements
=
cqc
.
cons