Skip to content
Snippets Groups Projects
Commit 9ef3884b authored by Christoph Alt's avatar Christoph Alt
Browse files

Merge branch 'razvan/PercolationGPU' into 'main'

Adding Dashboards for Percolation and for the roofline dashboard

See merge request !11
parents 29d5c6f1 6b06020e
Branches update-codegen
2 merge requests!11Adding Dashboards for Percolation and for the roofline dashboard,!10Just Bandwidth panel is implemented properly
Pipeline #68980 passed with stages
in 1 minute and 22 seconds
......@@ -12,11 +12,14 @@ from grafanalib.influxdb import InfluxDBTarget
from dashboards.annotations import Annotation
from dashboards.influx_queries import Query
from dashboards.legends import AxisLabel, Units
def get_influx_target(target_query: str, **kwargs) -> InfluxDBTarget:
def get_influx_target(target_query: str, result_format: str = 'time_series', alias: str = "", **kwargs) -> InfluxDBTarget:
return InfluxDBTarget(
query=target_query,
format=result_format,
alias=alias,
**{k: v for k, v in kwargs.items() if v is not None},
)
......@@ -76,11 +79,11 @@ def build_row_repeat_dashboard(
dataSource: str,
measurment_name: str,
panel_query: Query,
unit: str,
axisLabel: str,
unit: Units,
axisLabel: AxisLabel,
other_vars: List[Template] = None,
annotations: Annotations = Annotations(),
alias: str = None,
alias: str = '',
) -> Dashboard:
"""Build a Dashboard that takes one query and repeats that with 2 variables."""
time_series_kwargs = {
......@@ -88,8 +91,8 @@ def build_row_repeat_dashboard(
'dataSource': dataSource,
'targets': [get_influx_target(str(panel_query), alias=alias)],
'repeat': Repeat('h', panel_repeat_var.name),
'unit': unit,
'axisLabel': axisLabel,
'unit': unit.value,
'axisLabel': axisLabel.value,
'pointSize': 9,
}
if other_vars is None:
......
......@@ -6,12 +6,14 @@ from dashboards.dashboard_base import (DashboardOptions,
from dashboards.overrides import get_line_style_regex_override, get_color_regex_override
from dashboards.panels import PanelInfos, get_time_series_panel, get_text_panel
from dashboards.panels import PanelInfos, get_time_series_panel, get_text_panel, is_regex
from dashboards.variables import get_dashboard_variable, Filter
from dashboards.influx_queries import join_variable_and
from dashboards.legends import Units
from dashboards.influx_queries import Query
description_markdown = r"""
- Linestyle indicates the compiler:
......@@ -102,9 +104,13 @@ def dashboard_fe2ti():
get_time_series_panel(
field,
data_source,
measurment_name,
where=where,
group_by=[f.name for f in filters],
[Query(select_=field.name,
from_=measurment_name,
where_=where,
group_by=[f.name for f in filters],
from_string=not is_regex(measurment_name),
select_string=not is_regex(field.name))
],
overrides=overrides,
)
for field in fields]
......
......@@ -5,7 +5,7 @@ from dashboards.dashboard_base import (DashboardOptions,
Repeat,)
from dashboards.panels import PanelInfos, get_time_series_panel
from dashboards.panels import PanelInfos, get_time_series_panel, is_regex
from dashboards.variables import get_dashboard_variable, Filter, get_measurement_filter
from dashboards.influx_queries import join_variable_and
......@@ -14,6 +14,7 @@ from dashboards.legends import Units
import cbutil.ncu_keys as ncu_keys
import cbutil.likwid_keys as likwid_keys
from dashboards.influx_queries import Query
INTEL_LINESTYLE = "solid"
GCC_LINESTYLE = "dashed"
......@@ -64,9 +65,12 @@ def dashboard_pystencils_cpu():
get_time_series_panel(
field,
data_source,
f"/^${benchmark.name}$/",
where=where,
group_by=group_by,
[Query(select_=field.name,
from_=f"/^${benchmark.name}$/",
where_=where,
group_by=group_by,
from_string=not is_regex(f"/^${benchmark.name}$/"),
select_string=not is_regex(field.name))],
)
for field in fields]
......@@ -125,9 +129,12 @@ def dashboard_pystencils_gpu():
get_time_series_panel(
field,
data_source,
f"/^${benchmark.name}$/",
where=where,
group_by=group_by,
[Query(select_=field.name,
from_=f"/^${benchmark.name}$/",
where_=where,
group_by=group_by,
from_string=not is_regex(f"/^${benchmark.name}$/"),
select_string=not is_regex(field.name))],
)
for field in fields]
......
This diff is collapsed.
......@@ -5,7 +5,7 @@ import dashboards.dashboard_list as boards
from dashboards.upload import upload_dashboard
from dashboards.dashboard_fe2ti import dashboard_fe2ti
from dashboard_pystencils import dashboard_pystencils_cpu, dashboard_pystencils_gpu
from dashboard_walberla import dashboard_uniformgridcpu, dashboard_uniformgridgpu, dashboard_uniformgridgpu_profile, dashboard_fslbmgravitywave
from dashboard_walberla import dashboard_uniformgridcpu, dashboard_uniformgridgpu, dashboard_uniformgridgpu_profile, dashboard_fslbmgravitywave, dashboard_uniformgridcpu_relativeperformance, dashboard_percolationgpu
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
......@@ -46,6 +46,8 @@ def main():
upload_dashboard(dashboard_uniformgridgpu(), folder=walberla_folder)
upload_dashboard(dashboard_uniformgridgpu_profile(), folder=walberla_folder)
upload_dashboard(dashboard_fslbmgravitywave(), folder=walberla_folder)
upload_dashboard(dashboard_uniformgridcpu_relativeperformance(), folder=walberla_folder)
upload_dashboard(dashboard_percolationgpu(), folder=walberla_folder)
else:
board = getattr(boards, board_name)
upload_dashboard(board(), folder=walberla_folder)
......
......@@ -6,8 +6,8 @@ from typing import List, Union
@dataclass
class Query:
select_: str
from_: str
where_: str
from_: Union[str, 'Query']
where_: str = None
group_by: List[str] = field(default_factory=list)
select_string: bool = True
from_string: bool = True
......@@ -19,17 +19,22 @@ class Query:
return f"SELECT {self.select_} "
def _get_from(self):
if self.from_string:
if isinstance(self.from_, Query):
return f'FROM ({self.from_}) '
elif self.from_string:
return f'FROM "{self.from_}" '
else:
return f"FROM {self.from_} "
def _get_where(self):
return f"WHERE ({self.where_}) AND $timeFilter "
if self.where_:
return f"WHERE ({self.where_}) "
else:
return ""
def _get_group_by(self):
if self.group_by:
group_by = ", ".join(f'"{tag}"' for tag in self.group_by)
group_by = ", ".join(tag if tag.endswith("::tag") else f'"{tag}"' for tag in self.group_by)
return f"GROUP BY {group_by}"
else:
return ""
......@@ -52,12 +57,34 @@ def show_tag_values(table: str, key_name: str) -> str:
return f'{base} {from_part}WITH key = "{key_name}"'
def get_tag_values(table: str, key_name: str, *, inner_field_key="") -> str:
def get_tag_values(table: str, key_name: str, *, inner_field_key="", where="",
group_by=None) -> str:
if group_by is None:
group_by = [key_name]
where_clause = "$timeFilter"
if where:
where_clause = where
inner_select = "*"
if inner_field_key != "":
inner_select = f'"{inner_field_key}", "{key_name}"'
inner_query = f'SELECT {inner_select} FROM "{table}" WHERE $timeFilter'
return f'SELECT distinct("{key_name}") FROM ({inner_query}) GROUP BY {key_name}'
inner_query = f'SELECT {inner_select} FROM "{table}" WHERE {where_clause}'
group_by_clause = ", ".join(group_by)
return f'SELECT distinct("{key_name}") FROM ({inner_query}) GROUP BY {group_by_clause}'
def show_field_keys(table: str) -> str:
"""Return influx query to get all field keys from a measurment."""
base = "SHOW FIELD KEYS"
from_part = ""
if table != "":
from_part = f'from "{table}" '
return f'{base} {from_part}'
def get_variable_condition(variable_name: str, *, tag_key: str = None) -> str:
......@@ -71,7 +98,51 @@ def get_variable_condition(variable_name: str, *, tag_key: str = None) -> str:
return f'"{clean_lhs}" =~ /^${{{clean_rhs}:regex}}$/'
def join_conditions(conditions: List[str], operators: Union[List[str], str]):
def get_variable_condition_unbounded(variable_name: str, *, tag_key: str = None) -> str:
clean_rhs = variable_name.strip()
if tag_key:
clean_lhs = tag_key.strip()
else:
clean_lhs = clean_rhs
if not clean_rhs:
raise ValueError("Empty variable name")
return f'"{clean_lhs}" =~ /${{{clean_rhs}:regex}}/'
def get_variable_condition_with_tag(variable_name: str, *, tag_key: str = None) -> str:
clean_rhs = variable_name.strip()
if tag_key:
clean_lhs = tag_key.strip()
else:
clean_lhs = clean_rhs
if not clean_rhs:
raise ValueError("Empty variable name")
return f'"{clean_lhs}"::tag =~ /^${clean_rhs}$/'
def get_variable_condition_without_regex(variable_name: str, *, tag_key: str = None) -> str:
clean_rhs = variable_name.strip()
if tag_key:
clean_lhs = tag_key.strip()
else:
clean_lhs = clean_rhs
if not clean_rhs:
raise ValueError("Empty variable name")
return f'"{clean_lhs}" =~ /^${clean_rhs}$/'
def get_variable_tag(variable_name: str, *, tag_key: str = None) -> str:
clean_rhs = variable_name.strip()
if tag_key:
clean_lhs = tag_key.strip()
else:
clean_lhs = clean_rhs
if not clean_rhs:
raise ValueError("Empty variable name")
return f'"{clean_lhs}"::tag'
def join_conditions(conditions: List[str], operators: Union[List[str], str], include_time_filter: bool = False):
ops = operators
if isinstance(operators, str):
ops = repeat(operators, len(conditions) - 1)
......@@ -84,10 +155,14 @@ def join_conditions(conditions: List[str], operators: Union[List[str], str]):
ret = conditions[0]
for op, cond in zip(ops, conditions[1:]):
ret += f" {op} {cond}"
if include_time_filter:
ret += ' AND $timeFilter'
return ret
def join_variable_and(variable_names: List[str]) -> str:
def join_variable_and(variable_names: List[str], include_time_filter: bool = False) -> str:
return join_conditions(
[get_variable_condition(name) for name in variable_names], "AND"
[get_variable_condition(name) for name in variable_names], "AND", include_time_filter
)
......@@ -14,6 +14,8 @@ class Units(str, Enum):
flop_per_byte = 'Flop/Byte'
mflop_sec = 'mflops'
percent = 'percentunit'
mlups = 'MLUP/s'
none = ''
class AxisLabel(str, Enum):
......
from dataclasses import dataclass, field
# from collections import namedtuple
from dashboards.influx_queries import Query
from grafanalib.core import TimeSeries, Text, Stat, Template, Repeat, Threshold
from grafanalib.core import (
TimeSeries,
Text,
Stat,
Template,
Repeat,
Threshold,
Table,
BarChart,
PieChartv2,
)
from dashboards.dashboard_base import get_influx_target
from dashboards.legends import Units
from numbers import Number
from typing import List
# PanelInfos = namedtuple("PanelInfos", ("name", "unit"))
......@@ -12,7 +24,7 @@ from numbers import Number
@dataclass
class PanelInfos:
name: str
unit: Units
unit: Units = field(default=Units.none)
absthreshold: Number = field(default=None)
......@@ -20,76 +32,252 @@ def is_regex(name):
return name[0] == "/" and name[-1] == "/"
def get_time_series_panel(panel_infos: PanelInfos,
data_source: str,
measurment_name: str,
*,
where=None,
group_by=None,
overrides=None,
**kwargs):
query = Query(select_=panel_infos.name,
from_=measurment_name,
where_=where,
group_by=group_by,
from_string=not is_regex(measurment_name),
select_string=not is_regex(panel_infos.name))
def get_time_series_panel(
panel_infos: PanelInfos,
data_source: str,
query_list: List[Query],
*,
overrides=None,
pointSize: int = 9,
**kwargs,
):
targets = [get_influx_target(str(query)) for query in query_list]
new_kwargs = {**kwargs}
if panel_infos.absthreshold is not None:
new_kwargs.update({'thresholdType': 'absolute',
'thresholds': [Threshold('green', 0, 0.0),
Threshold('red', index=1, value=float(panel_infos.absthreshold), op='lt'), ],
'thresholdsStyleMode': 'line',
}
)
if "thresholdsStyleMode" not in new_kwargs:
new_kwargs.update(
{
"thresholdType": "absolute",
"thresholds": [
Threshold("green", 0, 0.0),
Threshold(
"red",
index=1,
value=float(panel_infos.absthreshold),
op="lt",
),
],
"thresholdsStyleMode": "line",
}
)
else:
new_kwargs.update(
{
"thresholdType": "absolute",
"thresholds": [
Threshold("green", 0, 0.0),
Threshold(
"red",
index=1,
value=float(panel_infos.absthreshold),
op="lt",
),
],
}
)
return TimeSeries(
title=panel_infos.name,
dataSource=data_source,
targets=[get_influx_target(str(query))],
targets=targets,
unit=panel_infos.unit,
pointSize=9,
pointSize=pointSize,
overrides=overrides,
**new_kwargs,
)
def get_text_panel(content: str, *, mode='markdown', **kwargs) -> Text:
return Text(
content=content,
mode=mode,
**kwargs
)
def get_text_panel(content: str, *, mode="markdown", **kwargs) -> Text:
return Text(content=content, mode=mode, **kwargs)
def get_stat_panel(title: str,
dataSource: str,
stat_query: Query,
repeat: Template = None,
alias: str = "",
*,
maxPerRow=0,
**kwargs):
def get_stat_panel(
title: str,
dataSource: str,
stat_query: Query,
repeat: Template = None,
alias: str = "",
*,
maxPerRow=0,
**kwargs,
):
new_kwargs = {
'alignment': 'center',
'colorMode': 'value',
'graphMode': 'area',
'reduceCalc': 'last',
'orientation': 'auto',
'transparent': True,
"alignment": "center",
"colorMode": "value",
"graphMode": "area",
"reduceCalc": "last",
"orientation": "auto",
"transparent": True,
}
new_kwargs.update(kwargs)
if repeat:
rep_args = ['h', repeat.name]
rep_args = ["h", repeat.name]
if maxPerRow:
rep_args.append(maxPerRow)
new_kwargs.setdefault('repeat', Repeat(*rep_args))
new_kwargs.setdefault("repeat", Repeat(*rep_args))
return Stat(
title=title,
dataSource=dataSource,
targets=[get_influx_target(str(stat_query), alias=alias)],
thresholdType='percentage',
thresholds=[Threshold('green', 0, 0.0), Threshold('yellow', 1, 50.0), Threshold('red', 2, 80.0)],
** new_kwargs,
thresholdType="percentage",
thresholds=[
Threshold("green", 0, 0.0),
Threshold("yellow", 1, 50.0),
Threshold("red", 2, 80.0),
],
**new_kwargs,
)
def get_table_panel(
panel_infos: PanelInfos,
data_source: str,
query_list: List[Query],
*,
result_format_list: List[str] = None,
alias_list: List[str] = None,
transformations=[],
overrides=None,
**kwargs,
):
if not alias_list:
alias_list = [""] * len(query_list)
if not result_format_list:
result_format_list = ["table"] * len(query_list)
targets = [
get_influx_target(str(query), result_format=result_format, alias=alias)
for query, result_format, alias in zip(
query_list, result_format_list, alias_list
)
]
new_kwargs = {**kwargs}
if panel_infos.absthreshold is not None:
new_kwargs.update(
{
"thresholdType": "absolute",
"thresholds": [
Threshold("green", 0, 0.0),
Threshold(
"red", index=1, value=float(panel_infos.absthreshold), op="lt"
),
],
}
)
return Table(
title=panel_infos.name,
dataSource=data_source,
targets=targets,
transformations=transformations,
unit=panel_infos.unit,
overrides=overrides,
**new_kwargs,
)
def get_bar_chart_panel(
panel_infos: PanelInfos,
data_source: str,
query_list: List[Query],
*,
result_format_list: List[str] = None,
alias_list: List[str] = None,
transformations=[],
overrides=None,
**kwargs,
):
if not alias_list:
alias_list = [""] * len(query_list)
if not result_format_list:
result_format_list = ["table"] * len(query_list)
targets = [
get_influx_target(str(query), result_format=result_format, alias=alias)
for query, result_format, alias in zip(
query_list, result_format_list, alias_list
)
]
new_kwargs = {**kwargs}
if panel_infos.absthreshold is not None:
new_kwargs.update(
{
"thresholdType": "absolute",
"thresholds": [
Threshold("green", 0, 0.0),
Threshold(
"red", index=1, value=float(panel_infos.absthreshold), op="lt"
),
],
}
)
extraJson = {
"fieldConfig": {
"defaults": {"fieldMinMax": True, "max": 1, "unit": panel_infos.unit}
}
}
return BarChart(
title=panel_infos.name,
dataSource=data_source,
targets=targets,
transformations=transformations,
xTickLabelRotation=-45,
extraJson=extraJson,
**new_kwargs,
)
def get_pie_chart_panel(
panel_infos: PanelInfos,
data_source: str,
query_list: List[Query],
*,
result_format_list: List[str] = None,
alias_list: List[str] = None,
transformations=[],
overrides=[],
**kwargs,
):
targets = [get_influx_target(str(query)) for query in query_list]
new_kwargs = {**kwargs}
if panel_infos.absthreshold is not None:
if "thresholdsStyleMode" not in new_kwargs:
new_kwargs.update(
{
"thresholdType": "absolute",
"thresholds": [
Threshold("green", 0, 0.0),
Threshold(
"red",
index=1,
value=float(panel_infos.absthreshold),
op="lt",
),
],
"thresholdsStyleMode": "line",
}
)
else:
new_kwargs.update(
{
"thresholdType": "absolute",
"thresholds": [
Threshold("green", 0, 0.0),
Threshold(
"red",
index=1,
value=float(panel_infos.absthreshold),
op="lt",
),
],
}
)
return PieChartv2(
title=panel_infos.name,
dataSource=data_source,
targets=targets,
transformations=transformations,
unit=panel_infos.unit,
overrides=overrides,
**new_kwargs,
)
from grafanalib.core import Template
from collections import namedtuple
from dashboards.influx_queries import show_tag_values, get_tag_values
from dashboards.influx_queries import show_tag_values, get_tag_values, show_field_keys
from typing import List
"""
multi: maps to the multi_value option
refresh: maps to the grafanalib.Template refresh 1= on dashboard load 2= on timer range change
"""
Filter = namedtuple(
"Filter", ("name", "multi", "default_value"), defaults=("", True, "")
"Filter", ("name", "multi", "default_value", "refresh"), defaults=("", True, "", 1)
)
......@@ -33,10 +38,28 @@ def get_time_dependend_dashboard_variable(
data_source: str,
*,
inner_field_key: str = "",
where: str = "",
group_by: List[str] = None
):
query = get_tag_values(
measurment_name, filter.name, inner_field_key=inner_field_key
measurment_name,
filter.name,
inner_field_key=inner_field_key,
where=where,
group_by=group_by
)
kwargs = {
"includeAll": filter.multi,
"multi": filter.multi,
"refresh": filter.refresh
}
if filter.default_value:
kwargs.update({"default": filter.default_value})
return get_dashboard_variable_query(filter.name, query, data_source, **kwargs)
def get_field_keys_dashboard_variable(filter: Filter, measurment_name: str, data_source: str):
query = show_field_keys(measurment_name)
kwargs = {
"includeAll": filter.multi,
"multi": filter.multi,
......
......@@ -7,7 +7,7 @@ from dashboards.dashboard_base import (get_commit_annotation,
from dashboards.dashboard_list import dashboard_uniformGridGPU
from dashboards.dashboard_fe2ti import dashboard_fe2ti
from dashboards.dashboard_pystencils import dashboard_pystencils_cpu, dashboard_pystencils_gpu
from dashboards.dashboard_walberla import dashboard_uniformgridgpu, dashboard_uniformgridcpu
from dashboards.dashboard_walberla import dashboard_uniformgridgpu, dashboard_uniformgridcpu, dashboard_percolationgpu, dashboard_fslbmgravitywave, dashboard_uniformgridgpu_profile, dashboard_uniformgridcpu_relativeperformance
from dashboards.influx_queries import Query, show_tag_values
dataSource = 'InfluxDB-1'
......@@ -96,3 +96,7 @@ def test_dashboard_pystencils_cpu():
def test_dashboard_walberla():
dashboard_uniformgridcpu()
dashboard_uniformgridgpu()
dashboard_uniformgridgpu_profile()
dashboard_percolationgpu()
dashboard_uniformgridcpu_relativeperformance()
dashboard_fslbmgravitywave()
# Test case using pytest
import pytest
from dashboards.upload import load_config_from_env
from unittest.mock import patch
import os
from dashboards.upload import load_config_from_env # Replace with the correct path to your function
def test_load_config_from_env():
# Case 1: Test if function raises exception for missing GRAFANA_API_KEY
with pytest.raises(ValueError) as e:
load_config_from_env(env_path="")
assert str(
e.value) == "GRAFANA_API_KEY is None or not defined in the .env file"
# Case 2: Test if function raises exception for missing GRAFANA_SERVER
with patch.dict(os.environ, {"GRAFANA_API_KEY": "api_key"}):
with pytest.raises(ValueError) as e:
load_config_from_env(env_path="")
assert str(
e.value) == "GRAFANA_SERVER is None or not defined in the .env file"
# Test when GRAFANA_API_KEY is missing
@patch('dotenv.load_dotenv')
@patch('os.getenv')
def test_missing_grafana_api_key(mock_getenv, mock_load_dotenv):
# Setup mock return values with missing GRAFANA_API_KEY
mock_getenv.side_effect = lambda key: {
'GRAFANA_SERVER': 'http://test.server'
}.get(key)
# Case 3: Test if function returns expected values when both variables are defined
with patch.dict(
os.environ, {"GRAFANA_API_KEY": "api_key",
"GRAFANA_SERVER": "server_url"}
):
result = load_config_from_env(env_path=".env")
assert result == ("server_url", "api_key")
# Test that ValueError is raised for missing GRAFANA_API_KEY
with pytest.raises(ValueError, match="GRAFANA_API_KEY is None or not defined in the .env file"):
load_config_from_env()
# Test when GRAFANA_SERVER is missing
@patch('dotenv.load_dotenv')
@patch('os.getenv')
def test_missing_grafana_server(mock_getenv, mock_load_dotenv):
# Setup mock return values with missing GRAFANA_SERVER
mock_getenv.side_effect = lambda key: {
'GRAFANA_API_KEY': 'test_api_key'
}.get(key)
# Test that ValueError is raised for missing GRAFANA_SERVER
with pytest.raises(ValueError, match="GRAFANA_SERVER is None or not defined in the .env file"):
load_config_from_env()
# Test when the .env file does not exist
@patch('dotenv.load_dotenv')
def test_no_env_file(mock_load_dotenv):
# Simulate that the .env file does not exist
with patch('os.path.exists', return_value=False):
with pytest.raises(ValueError, match="GRAFANA_API_KEY is None or not defined in the .env file"):
load_config_from_env()
......@@ -12,7 +12,7 @@ def test_query():
q = Query(
select_="mlupsPerProcess",
from_="UniformGridGPU",
where_='"host" =~ /^${host:regex}$/ AND "collisionSetup" =~ /^${collisionSetup:regex}$/',
where_='"host" =~ /^${host:regex}$/ AND "collisionSetup" =~ /^${collisionSetup:regex}$/ AND $timeFilter ',
group_by=[
"blocks_0",
"blocks_1",
......@@ -32,7 +32,7 @@ def test_query():
q1 = (
'SELECT "mlupsPerProcess" '
'FROM "UniformGridGPU" '
'WHERE ("host" =~ /^${host:regex}$/ AND "collisionSetup" =~ /^${collisionSetup:regex}$/) AND $timeFilter '
'WHERE ("host" =~ /^${host:regex}$/ AND "collisionSetup" =~ /^${collisionSetup:regex}$/ AND $timeFilter ) '
'GROUP BY "blocks_0", "blocks_1", "blocks_2", '
'"cellsPerBlock_0", "cellsPerBlock_1", "cellsPerBlock_2", '
'"gpuBlockSize_0", "gpuBlockSize_1", "gpuBlockSize_2", '
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment