benchmark.py 4.97 KB
Newer Older
Markus Holzer's avatar
Markus Holzer committed
1
2
3
4
5
import os
import waLBerla as wlb
import pandas as pd

from waLBerla.tools.sqlitedb import sequenceValuesToScalars
Markus Holzer's avatar
Markus Holzer committed
6
from waLBerla.tools.config import block_decomposition
Markus Holzer's avatar
Markus Holzer committed
7
8

import sys
Markus Holzer's avatar
Markus Holzer committed
9
10
11
12
13
14
15
16
17
18
19
from math import prod


def domain_block_size_ok(block_size, total_mem, gls=1, q_phase=15, q_hydro=27, size_per_value=8):
    """Checks if a single block of given size fits into GPU memory"""

    cells = prod(b + 2 * gls for b in block_size)
    # 3 values for the velocity and two for the phase field and the temporary phase field
    values_per_cell = 2 * q_phase + 2 * q_hydro + 3 + 2
    needed_memory = values_per_cell * cells * size_per_value
    return needed_memory < total_mem
Markus Holzer's avatar
Markus Holzer committed
20
21
22


class Scenario:
Markus Holzer's avatar
Markus Holzer committed
23
24
    def __init__(self, time_step_strategy, cuda_block_size, cells_per_block=(256, 256, 256),
                 cuda_enabled_mpi=False):
Markus Holzer's avatar
Markus Holzer committed
25
26
27
28
29
        # output frequencies
        self.vtkWriteFrequency = 0

        # simulation parameters
        self.timesteps = 101
Markus Holzer's avatar
Markus Holzer committed
30
31
        self.cells_per_block = cells_per_block
        self.blocks = block_decomposition(wlb.mpi.numProcesses())
Markus Holzer's avatar
Markus Holzer committed
32
        self.periodic = (1, 1, 1)
Markus Holzer's avatar
Markus Holzer committed
33
34
35
        self.size = (self.cells_per_block[0] * self.blocks[0],
                     self.cells_per_block[1] * self.blocks[1],
                     self.cells_per_block[2] * self.blocks[2])
Markus Holzer's avatar
Markus Holzer committed
36

Markus Holzer's avatar
Markus Holzer committed
37
        self.timeStepStrategy = time_step_strategy
Markus Holzer's avatar
Markus Holzer committed
38
39
40
        self.cuda_block_size = cuda_block_size
        self.warmupSteps = 10

Markus Holzer's avatar
Markus Holzer committed
41
42
        self.cudaEnabledMpi = cuda_enabled_mpi

Markus Holzer's avatar
Markus Holzer committed
43
44
45
46
47
48
49
50
51
52
53
54
55
56
        # bubble parameters
        self.bubbleRadius = min(self.size) // 4
        self.bubbleMidPoint = (self.size[0] / 2, self.size[1] / 2, self.size[2] / 2)

        self.scenario = 1  # 1 rising bubble, 2 RTI
        self.config_dict = self.config()

        self.csv_file = "benchmark.csv"

    @wlb.member_callback
    def config(self):
        return {
            'DomainSetup': {
                'blocks': self.blocks,
Markus Holzer's avatar
Markus Holzer committed
57
                'cellsPerBlock': self.cells_per_block,
Markus Holzer's avatar
Markus Holzer committed
58
59
60
61
62
63
64
65
66
67
                'periodic': self.periodic,
            },
            'Parameters': {
                'timesteps': self.timesteps,
                'vtkWriteFrequency': self.vtkWriteFrequency,
                'remainingTimeLoggerFrequency': -1,
                'timeStepStrategy': self.timeStepStrategy,
                'gpuBlockSize': self.cuda_block_size,
                'warmupSteps': self.warmupSteps,
                'scenario': self.scenario,
Markus Holzer's avatar
Markus Holzer committed
68
                'cudaEnabledMpi': self.cudaEnabledMpi
Markus Holzer's avatar
Markus Holzer committed
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
            },
            'Boundaries_GPU': {
                'Border': []
            },
            'Boundaries_CPU': {
                'Border': []
            },
            'Bubble': {
                'bubbleMidPoint': self.bubbleMidPoint,
                'bubbleRadius': self.bubbleRadius,
            },
        }

    @wlb.member_callback
    def results_callback(self, **kwargs):
        data = {}
        data.update(self.config_dict['Parameters'])
        data.update(self.config_dict['DomainSetup'])
        data.update(kwargs)
        data['executable'] = sys.argv[0]
        data['compile_flags'] = wlb.build_info.compiler_flags
        data['walberla_version'] = wlb.build_info.version
        data['build_machine'] = wlb.build_info.build_machine
        sequenceValuesToScalars(data)

        df = pd.DataFrame.from_records([data])
        if not os.path.isfile(self.csv_file):
            df.to_csv(self.csv_file, index=False)
        else:
            df.to_csv(self.csv_file, index=False, mode='a', header=False)


Markus Holzer's avatar
Markus Holzer committed
101
def benchmark():
Markus Holzer's avatar
Markus Holzer committed
102
    scenarios = wlb.ScenarioManager()
Markus Holzer's avatar
Markus Holzer committed
103
104
105
106
107
108
109
110
111
112

    gpu_mem_gb = int(os.environ.get('GPU_MEMORY_GB', 8))
    gpu_mem = gpu_mem_gb * (2 ** 30)

    block_size = (256, 256, 256)

    if not domain_block_size_ok(block_size, gpu_mem):
        wlb.log_info_on_root(f"Block size {block_size} would exceed GPU memory. Skipping.")
    else:
        scenarios.add(Scenario(time_step_strategy='normal', cuda_block_size=(256, 1, 1), cells_per_block=block_size))
Markus Holzer's avatar
Markus Holzer committed
113
114
115
116
117


def kernel_benchmark():
    scenarios = wlb.ScenarioManager()

Markus Holzer's avatar
Markus Holzer committed
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
    gpu_mem_gb = int(os.environ.get('GPU_MEMORY_GB', 8))
    gpu_mem = gpu_mem_gb * (2 ** 30)

    block_sizes = [(i, i, i) for i in (32, 64, 128, 256, 320, 384, 448, 512)]

    cuda_blocks = [(32, 1, 1), (64, 1, 1), (128, 1, 1), (256, 1, 1),
                   (32, 2, 1), (64, 2, 1), (128, 2, 1),
                   (32, 4, 1), (64, 4, 1),
                   (32, 4, 2),
                   (32, 8, 1),
                   (16, 16, 1)]

    for time_step_strategy in ['phase_only', 'hydro_only', 'kernel_only', 'normal']:
        for cuda_block in cuda_blocks:
            for block_size in block_sizes:
                if not domain_block_size_ok(block_size, gpu_mem):
                    wlb.log_info_on_root(f"Block size {block_size} would exceed GPU memory. Skipping.")
                    continue
                scenario = Scenario(time_step_strategy=time_step_strategy,
                                    cuda_block_size=cuda_block,
                                    cells_per_block=block_size)
                scenarios.add(scenario)
Markus Holzer's avatar
Markus Holzer committed
140
141


Markus Holzer's avatar
Markus Holzer committed
142
# benchmark()
Markus Holzer's avatar
Markus Holzer committed
143
kernel_benchmark()