Skip to content
Snippets Groups Projects
Commit 9d5fdeb4 authored by Martin Bauer's avatar Martin Bauer
Browse files

lbmpy benchmark

parent fb99632c
Branches
Tags
No related merge requests found
...@@ -136,12 +136,19 @@ class Database: ...@@ -136,12 +136,19 @@ class Database:
@staticmethod @staticmethod
def get_environment(): def get_environment():
return { result = {
'timestamp': time.mktime(time.gmtime()), 'timestamp': time.mktime(time.gmtime()),
'hostname': socket.gethostname(), 'hostname': socket.gethostname(),
'cpuCompilerConfig': get_compiler_config(), 'cpuCompilerConfig': get_compiler_config(),
} }
try:
from git import Repo, InvalidGitRepositoryError
repo = Repo(search_parent_directories=True)
result['git_hash'] = str(repo.head.commit)
except (ImportError, InvalidGitRepositoryError):
pass
return result
# ----------------------------------------- Helper Functions ----------------------------------------------------------- # ----------------------------------------- Helper Functions -----------------------------------------------------------
......
...@@ -56,21 +56,36 @@ class TimeLoop: ...@@ -56,21 +56,36 @@ class TimeLoop:
time_for_one_iteration = (end - start) / time_steps time_for_one_iteration = (end - start) / time_steps
return time_for_one_iteration return time_for_one_iteration
def benchmark(self, time_for_benchmark=5, init_time_steps=10, number_of_time_steps_for_estimation=20): def run_time_span(self, seconds):
""" iterations = 0
Returns the time in seconds for one time step self.pre_run()
start = time.perf_counter()
while time.perf_counter() < start + seconds:
self.time_step()
iterations += 1
end = time.perf_counter()
self.post_run()
return iterations, end - start
def benchmark(self, time_for_benchmark=5, init_time_steps=2, number_of_time_steps_for_estimation='auto'):
"""Returns the time in seconds for one time step.
:param time_for_benchmark: number of seconds benchmark should take Args:
:param init_time_steps: number of time steps run initially for warm up, to get arrays into cache etc time_for_benchmark: number of seconds benchmark should take
:param number_of_time_steps_for_estimation: time steps run before real benchmarks, to determine number of time init_time_steps: number of time steps run initially for warm up, to get arrays into cache etc
steps that approximately take 'time_for_benchmark' number_of_time_steps_for_estimation: time steps run before real benchmarks, to determine number of time
steps that approximately take 'time_for_benchmark' or 'auto'
""" """
# Run a few time step to get first estimate # Run a few time step to get first estimate
duration_of_time_step = self.benchmark_run(number_of_time_steps_for_estimation, init_time_steps) if number_of_time_steps_for_estimation == 'auto':
iterations, total_time = self.run_time_span(0.5)
duration_of_time_step = total_time / iterations
else:
duration_of_time_step = self.benchmark_run(number_of_time_steps_for_estimation, init_time_steps)
# Run for approximately 'time_for_benchmark' seconds # Run for approximately 'time_for_benchmark' seconds
time_steps = int(time_for_benchmark / duration_of_time_step) time_steps = int(time_for_benchmark / duration_of_time_step)
time_steps = max(time_steps, 20) time_steps = max(time_steps, 4)
return self.benchmark_run(time_steps, init_time_steps) return self.benchmark_run(time_steps, init_time_steps)
def pre_run(self): def pre_run(self):
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment