diff --git a/.gitignore b/.gitignore
index dccae81bc99d30399b830424bb81769623cbb15d..c4f7dbd9d235f1473eada09cadf4f659cf42d668 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,3 +3,4 @@ __pycache__/
 *.swp
 *.eggs
 *.egg-info/
+.env
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index b5f711695da86750bb79b766f5d5c9188b7f0ab5..095a303f687d0214d9528c4845ac8087c108dcf8 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,5 +1,6 @@
 stages:
   - test
+  - deploy
 
 test:
    stage: test
@@ -11,3 +12,12 @@ test:
       - pip install --user .
       - python -m pytest
 
+deploy_uniformgrid_gpu:
+  stage: deploy
+  tags:
+    - docker
+  image: python:3.8
+  script:
+    - pip install --user .
+    - python3 dashboards/deploy.py
+  when: manual
diff --git a/dashboards/__init__.py b/dashboards/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/dashboards/dashboard_base.py b/dashboards/dashboard_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8802413a1dd328cd72a00efecd515f3b8e9fdd6
--- /dev/null
+++ b/dashboards/dashboard_base.py
@@ -0,0 +1,70 @@
+from dataclasses import asdict, dataclass
+from typing import List
+
+from grafanalib.core import (Dashboard, Repeat, RowPanel, Template, Templating,
+                             Time, TimeSeries)
+from grafanalib.influxdb import InfluxDBTarget
+
+from dashboards.influx_queries import Query
+
+
+def get_influx_target(target_query: str) -> InfluxDBTarget:
+    return InfluxDBTarget(
+        query=target_query
+    )
+
+
+def get_dashboard_variable_query(name: str, template_query: str, dataSource: str, **kwargs) -> Template:
+    return Template(
+        name,
+        template_query,
+        dataSource=dataSource,
+        type="query",
+        includeAll=True,
+        multi=True,
+        **kwargs
+    )
+
+
+@dataclass
+class DashboardOptions:
+    title: str
+    description: str
+    tags: List[str]
+    timezone: str = "browser"
+
+
+def build_row_repeat_dashboard(options: DashboardOptions,
+                               row_repeat_var: Template,
+                               panel_repeat_var: Template,
+                               dataSource: str,
+                               measurment_name: str,
+                               panel_query: Query,
+                               unit: str,
+                               other_vars: List[Template] = [],
+                               ) -> Dashboard:
+    """Build a Dashboard that takes one query and repeats that with 2 variables."""
+    dashboard = Dashboard(
+        **asdict(options),
+        rows=[
+            RowPanel(
+                title=f'{row_repeat_var.name}: ${row_repeat_var.name}',
+                collapsed=True,
+                panels=[
+                    TimeSeries(
+                        title=f'{panel_repeat_var.name}: ${panel_repeat_var.name}',
+                        dataSource=dataSource,
+                        targets=[
+                            get_influx_target(str(panel_query))
+                        ],
+                        repeat=Repeat('h', panel_repeat_var.name),
+                        unit=unit,
+                    ),
+                ],
+                repeat=Repeat('v', row_repeat_var.name),
+            ),
+        ],
+        templating=Templating([panel_repeat_var, row_repeat_var, *other_vars]),
+        time=Time('now-7d', 'now'),
+    )
+    return dashboard.auto_panel_ids()
diff --git a/dashboards/dashboard_list.py b/dashboards/dashboard_list.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2e06962a6721297c52124c13e9c59099742084f
--- /dev/null
+++ b/dashboards/dashboard_list.py
@@ -0,0 +1,180 @@
+from typing import List
+
+from dashboards.dashboard_base import (DashboardOptions,
+                                       build_row_repeat_dashboard,
+                                       get_dashboard_variable_query)
+from dashboards.influx_queries import Query, join_variable_and, show_tag_values
+
+
+def _uniform_grid(arch: str, group_by: List[str]):
+    dataSource = 'InfluxDB-1'
+    measurment_name = f'UniformGrid{arch}'
+    row_repeat = "collisionSetup"
+    panel_repeat = "host"
+    unit = 'MLUPs per Process'
+
+    row_repeat_var = get_dashboard_variable_query(row_repeat,
+                                                  show_tag_values(measurment_name, row_repeat),
+                                                  dataSource)
+    panel_repeat_var = get_dashboard_variable_query(panel_repeat,
+                                                    show_tag_values(measurment_name, panel_repeat),
+                                                    dataSource)
+    other_filter = 'cellsPerBlock_0'
+    cellsPerBlock_var = get_dashboard_variable_query(other_filter,
+                                                     show_tag_values(measurment_name, other_filter),
+                                                     dataSource
+                                                     )
+    where = join_variable_and([row_repeat, panel_repeat, other_filter])
+    query = Query(
+        select_='mlupsPerProcess',
+        from_=measurment_name,
+        where_=where,
+        group_by=group_by
+    )
+    options = DashboardOptions(
+        title=f'Uniform Grid {arch}',
+        description=f"Benchmark dashboard for the Uniform Grid {arch} Benchmark from walberla",
+        tags=[
+            arch,
+            'benchmark',
+            'walberla',
+            'Uniform Grid'
+        ],
+        timezone="browser",
+    )
+    return build_row_repeat_dashboard(options,
+                                      row_repeat_var,
+                                      panel_repeat_var,
+                                      dataSource,
+                                      measurment_name,
+                                      query,
+                                      unit,
+                                      [cellsPerBlock_var]
+                                      )
+
+
+def dashboard_uniformGridGPU():
+    group_by = ["blocks_0",
+                "blocks_1",
+                "blocks_2",
+                "cellsPerBlock_0",
+                "cellsPerBlock_1",
+                "cellsPerBlock_2",
+                "gpuBlockSize_0",
+                "gpuBlockSize_1",
+                "gpuBlockSize_2",
+                "collisionSetup",
+                "stencil",
+                "streamingPattern",
+                ]
+
+    return _uniform_grid("GPU", group_by)
+
+
+def dashboard_uniformGridCPU():
+    group_by = ["blocks_0",
+                "blocks_1",
+                "blocks_2",
+                "cellsPerBlock_0",
+                "cellsPerBlock_1",
+                "cellsPerBlock_2",
+                "periodic_0",
+                "periodic_1",
+                "periodic_2",
+                "collisionSetup",
+                "mpi_num_processes",
+                "streamingPattern",
+                "timeStepStrategy",
+                "stencil",
+                ]
+
+    return _uniform_grid("CPU", group_by)
+
+
+def dashboard_granular_gas():
+    dataSource = 'InfluxDB-1'
+    measurment_name = 'MESA_PD_KernelBenchmark'
+    row_repeat = "kernel"
+    panel_repeat = "host"
+    unit = 'ms'
+
+    row_repeat_var = get_dashboard_variable_query(row_repeat,
+                                                  f"SHOW FIELD KEYS FROM {measurment_name}",
+                                                  dataSource)
+    panel_repeat_var = get_dashboard_variable_query(panel_repeat,
+                                                    show_tag_values(measurment_name, panel_repeat),
+                                                    dataSource)
+    query = Query(
+        select_='$kernel',
+        from_=measurment_name,
+        where_=f'"{panel_repeat}" =~ /^${panel_repeat}$/',
+        group_by=['mpi_num_processes', 'omp_max_threads'])
+    options = DashboardOptions(
+        title='Granular Gas Kernel Benchmark',
+        description="Benchmark dashboard for the Granular Gas Benchmark from walberla",
+        tags=[
+            'CPU',
+            'benchmark',
+            'walberla',
+            'Granular Gas'
+        ],
+        timezone="browser",
+    )
+    return build_row_repeat_dashboard(options,
+                                      row_repeat_var,
+                                      panel_repeat_var,
+                                      dataSource,
+                                      measurment_name,
+                                      query,
+                                      unit)
+
+
+def dashboard_phasefieldallenchan():
+    dataSource = 'InfluxDB-1'
+    measurment_name = 'PhaseFieldAllenCahn'
+    row_repeat = "cellsPerBlock_0"
+    panel_repeat = "host"
+    unit = 'MLUPs per Process'
+
+    options = DashboardOptions(
+        title='Phase Field Allen Chan',
+        description="Benchmark dashboard for the Phasefield Allen Cahn Benchmark from walberla",
+        tags=[
+            'CPU',
+            'benchmark',
+            'walberla',
+            'PhaseField Allen Cahn'
+        ],
+        timezone="browser",
+    )
+    row_repeat_var = get_dashboard_variable_query(row_repeat,
+                                                  show_tag_values(measurment_name, row_repeat),
+                                                  dataSource)
+    panel_repeat_var = get_dashboard_variable_query(panel_repeat,
+                                                    show_tag_values(measurment_name, panel_repeat),
+                                                    dataSource)
+    query = Query(
+        select_='mlupsPerProcess',
+        from_=measurment_name,
+        where_=join_variable_and([row_repeat, panel_repeat]),
+        group_by=[
+            "blocks_0",
+            "blocks_1",
+            "blocks_2",
+            "cellsPerBlock_0",
+            "mpi_num_processes",
+            "host",
+            "executable",
+            "timeStepStrategy",
+            "stencil_phase",
+            "stencil_hydro"
+        ]
+    )
+
+    return build_row_repeat_dashboard(options,
+                                      row_repeat_var,
+                                      panel_repeat_var,
+                                      dataSource,
+                                      measurment_name,
+                                      query,
+                                      unit)
diff --git a/dashboards/deploy.py b/dashboards/deploy.py
new file mode 100644
index 0000000000000000000000000000000000000000..29bee74cbebef4546033077e5bd86ead5ed71e3c
--- /dev/null
+++ b/dashboards/deploy.py
@@ -0,0 +1,12 @@
+import dashboards.dashboard_list as boards
+from dashboards.upload import upload_dashboard
+
+
+def main():
+    walberla_folder = 8
+    for board in [getattr(boards, func) for func in dir(boards) if func.startswith("dashboard_")]:
+        upload_dashboard(board(), folder=walberla_folder)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/dashboards/influx_queries.py b/dashboards/influx_queries.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4f622abf5628de3d8fdb129812e4ea32330b7f5
--- /dev/null
+++ b/dashboards/influx_queries.py
@@ -0,0 +1,55 @@
+from dataclasses import dataclass, field
+from itertools import repeat
+from typing import List, Union
+
+
+@dataclass
+class Query:
+    select_: str
+    from_: str
+    where_: str
+    group_by: List[str] = field(default_factory=list)
+
+    def __str__(self):
+
+        ret = f'SELECT \"{self.select_}\" '
+        ret += f'FROM \"{self.from_}\" '
+        ret += f'WHERE ({self.where_}) AND $timeFilter '
+        group_by = ', '.join(f'"{tag}"' for tag in self.group_by)
+        ret += f'GROUP BY {group_by}'
+        return ret
+
+
+def show_tag_values(table: str, key_name: str) -> str:
+    """Return influx query to get all tag values from a measurment."""
+    return f"SHOW TAG VALUES FROM \"{table}\" WITH key = \"{key_name}\""
+
+
+def get_variable_condition(variable_name: str) -> str:
+    clean = variable_name.strip()
+    if not clean:
+        raise ValueError("Empty variable name")
+    return f'"{clean}" =~ /^${clean}$/'
+
+
+def join_conditions(conditions: List[str], operators: Union[List[str], str]):
+    ops = operators
+    if isinstance(operators, str):
+        ops = repeat(operators, len(conditions) - 1)
+    elif len(operators) == 1:
+        ops = repeat(operators[0], len(conditions) - 1)
+    else:
+        if len(conditions) - 1 != len(operators):
+            raise ValueError("unfitting lengths of conditions and operators")
+
+    ret = conditions[0]
+    for op, cond in zip(ops, conditions[1:]):
+        ret += f' {op} {cond}'
+    return ret
+
+
+def join_variable_and(variable_names: List[str]) -> str:
+    return join_conditions(
+        [get_variable_condition(name) for name in variable_names],
+        "AND"
+    )
diff --git a/dashboards/upload.py b/dashboards/upload.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b1dc9973349d4ff9308452a2d131f44509641a8
--- /dev/null
+++ b/dashboards/upload.py
@@ -0,0 +1,67 @@
+from typing import Tuple
+from grafanalib.core import Dashboard
+from grafanalib._gen import DashboardEncoder
+import json
+import requests
+import os
+import dotenv
+
+
+def get_dashboard_json(dashboard, overwrite=False, message="Updated by grafanlib", folder=0):
+    '''
+    get_dashboard_json generates JSON from grafanalib Dashboard object
+
+    :param dashboard - Dashboard() created via grafanalib
+    '''
+
+    # grafanalib generates json which need to pack to "dashboard" root element
+    return json.dumps(
+        {
+            "dashboard": dashboard.to_json_data(),
+            "overwrite": overwrite,
+            "message": message,
+            "folderId": folder,
+        }, sort_keys=True, indent=2, cls=DashboardEncoder)
+
+
+def upload_to_grafana(json, server, api_key, verify=True):
+    '''
+    upload_to_grafana tries to upload dashboard to grafana and prints response
+
+    :param json - dashboard json generated by grafanalib
+    :param server - grafana server name
+    :param api_key - grafana api key with read and write privileges
+    '''
+
+    headers = {'Authorization': f"Bearer {api_key}", 'Content-Type': 'application/json'}
+    r = requests.post(f"https://{server}/api/dashboards/db", data=json, headers=headers, verify=verify)
+    # TODO: add error handling
+    print(f"{r.status_code} - {r.content}")
+
+
+def get_folder_list(server, api_key, verify=True):
+    '''
+    upload_to_grafana tries to upload dashboard to grafana and prints response
+
+    :param server - grafana server name
+    :param api_key - grafana api key with read and write privileges
+    '''
+
+    headers = {'Authorization': f"Bearer {api_key}", 'Content-Type': 'application/json'}
+    r = requests.get(f"https://{server}/api/folders", headers=headers, verify=verify)
+    # TODO: add error handling
+    print(f"{r.status_code} - {r.content}")
+
+
+def load_config_from_env(env_path: str = ".env") -> Tuple[str, str]:
+    if os.path.exists(env_path):
+        dotenv.load_dotenv(env_path)
+    grafana_api_key = os.getenv("GRAFANA_API_KEY")
+    grafana_server = os.getenv("GRAFANA_SERVER")
+    return grafana_server, grafana_api_key
+
+
+def upload_dashboard(dashboard: Dashboard, folder: int) -> None:
+    grafana_server, grafana_api_key = load_config_from_env()
+    dashboard_json = get_dashboard_json(dashboard, overwrite=True, folder=folder)
+    upload_to_grafana(dashboard_json, grafana_server, grafana_api_key)
diff --git a/setup.py b/setup.py
index 4d0a1aa3e2d4a590611b5e419ea4dbcf94bfb241..63401128bf4ab598a5b2ccbd2ac6a094142fa1fd 100644
--- a/setup.py
+++ b/setup.py
@@ -1,16 +1,19 @@
 #!/usr/bin/env python
-from setuptools import setup, find_packages
+from setuptools import find_packages, setup
 
 setup(name="cb-util",
       version="0.1",
       description="Collection of scripts and wrapper of contious benchmarking",
       author="Christoph Alt",
       author_email="Christoph.alt@fau.de",
-      packages=find_packages(include=["cbutil", "cbutil.postprocessing"]),
+      packages=find_packages(include=["cbutil",
+                                      "cbutil.postprocessing",
+                                      "dashboards"]),
       install_requires=[
           "python-dotenv",
           "influxdb",
           "gitpython",
+          "grafanalib",
       ],
       setup_requires=['pytest-runner'],
       tests_require=['pytest']
diff --git a/tests/test_dashboard_creation.py b/tests/test_dashboard_creation.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ac0b2f3e5b34d8039680609791955687962c184
--- /dev/null
+++ b/tests/test_dashboard_creation.py
@@ -0,0 +1,78 @@
+from grafanalib.core import (Dashboard, Repeat, RowPanel, Templating, Time,
+                             TimeSeries)
+
+from dashboards.dashboard_base import (get_dashboard_variable_query,
+                                       get_influx_target)
+from dashboards.dashboard_list import dashboard_uniformGridGPU
+from dashboards.influx_queries import Query, show_tag_values
+
+dataSource = 'InfluxDB-1'
+measurment_name = 'UniformGridGPU'
+q1 = Query(
+    select_='mlupsPerProcess',
+    from_=measurment_name,
+    where_='"collisionSetup" =~ /^$collisionSetup$/ AND "host" =~ /^$host$/ AND "cellsPerBlock_0" =~ /^$cellsPerBlock_0$/',
+    group_by=[
+            "blocks_0",
+            "blocks_1",
+            "blocks_2",
+            "cellsPerBlock_0",
+            "cellsPerBlock_1",
+            "cellsPerBlock_2",
+            "gpuBlockSize_0",
+            "gpuBlockSize_1",
+            "gpuBlockSize_2",
+            "collisionSetup",
+            "stencil",
+            "streamingPattern"
+    ]
+)
+
+host_var = get_dashboard_variable_query("host",
+                                        show_tag_values(measurment_name, "host"),
+                                        dataSource)
+collisionsetup_var = get_dashboard_variable_query("collisionSetup",
+                                                  show_tag_values(measurment_name, "collisionSetup"),
+                                                  dataSource)
+
+other_filter = 'cellsPerBlock_0'
+cellsPerBlock_var = get_dashboard_variable_query(other_filter,
+                                                 show_tag_values(measurment_name, other_filter),
+                                                 dataSource)
+
+dashboard = Dashboard(
+    title="Uniform Grid GPU",
+    description="Benchmark dashboard for the Uniform Grid GPU Benchmark from walberla",
+    tags=[
+        'GPU',
+        'benchmark',
+        'walberla',
+        'Uniform Grid'
+    ],
+    timezone="browser",
+    rows=[
+        RowPanel(
+            title="collisionSetup: $collisionSetup",
+            collapsed=True,
+            panels=[
+                TimeSeries(
+                    title='host: $host',
+                    dataSource=dataSource,
+                    targets=[
+                        get_influx_target(str(q1))
+                    ],
+                    repeat=Repeat('h', host_var.name),
+                    unit='MLUPs per Process',
+                ),
+            ],
+            repeat=Repeat('v', collisionsetup_var.name),
+        ),
+    ],
+    templating=Templating([host_var, collisionsetup_var, cellsPerBlock_var]),
+    time=Time('now-7d', 'now'),
+
+).auto_panel_ids()
+
+
+def test_build_dashboard():
+    assert dashboard_uniformGridGPU() == dashboard
diff --git a/tests/test_influx_queries.py b/tests/test_influx_queries.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0fb93783a9a0524ef020198c96a5eadcf47dd13
--- /dev/null
+++ b/tests/test_influx_queries.py
@@ -0,0 +1,75 @@
+import pytest
+
+from dashboards.influx_queries import (Query, get_variable_condition,
+                                       join_conditions, show_tag_values)
+
+
+def test_query():
+    q = Query(
+        select_='mlupsPerProcess',
+        from_='UniformGridGPU',
+        where_='"host" =~ /^$host$/ AND "collisionSetup" =~ /^$collisionSetup$/',
+        group_by=[
+            "blocks_0",
+            "blocks_1",
+            "blocks_2",
+            "cellsPerBlock_0",
+            "cellsPerBlock_1",
+            "cellsPerBlock_2",
+            "gpuBlockSize_0",
+            "gpuBlockSize_1",
+            "gpuBlockSize_2",
+            "collisionSetup",
+            "stencil",
+            "streamingPattern"
+        ]
+    )
+
+    q1 = ('SELECT "mlupsPerProcess" '
+          'FROM "UniformGridGPU" '
+          'WHERE ("host" =~ /^$host$/ AND "collisionSetup" =~ /^$collisionSetup$/) AND $timeFilter '
+          'GROUP BY "blocks_0", "blocks_1", "blocks_2", '
+          '"cellsPerBlock_0", "cellsPerBlock_1", "cellsPerBlock_2", '
+          '"gpuBlockSize_0", "gpuBlockSize_1", "gpuBlockSize_2", '
+          '"collisionSetup", "stencil", "streamingPattern"')
+    assert q1 == str(q)
+
+
+def test_show_tag_values():
+    s = show_tag_values("UniformGridGPU", "host")
+    assert s == 'SHOW TAG VALUES FROM \"UniformGridGPU\" WITH key = \"host\"'
+
+
+def test_get_variable_condtion():
+    assert get_variable_condition("host") == '"host" =~ /^$host$/'
+    assert get_variable_condition(" host ") == '"host" =~ /^$host$/'
+    with pytest.raises(ValueError):
+        get_variable_condition("")
+    with pytest.raises(ValueError):
+        get_variable_condition("    ")
+    with pytest.raises(ValueError):
+        get_variable_condition("\t    ")
+
+
+def test_join_conditions_two():
+    h = get_variable_condition("host")
+    c = get_variable_condition("collisionSetup")
+    actual = join_conditions([h, c], "AND")
+    excpected = '"host" =~ /^$host$/ AND "collisionSetup" =~ /^$collisionSetup$/'
+    assert actual == excpected
+
+
+def test_join_conditions_three():
+    conds = [get_variable_condition(name) for name in ["host",
+                                                       "collisionSetup",
+                                                       "cellsPerBlock_0"]]
+    # excpected = '"host" =~ /^$host$/ AND "collisionSetup" =~ /^$collisionSetup$/'
+    assert join_conditions(conds, "AND") == " AND ".join(conds)
+    assert join_conditions(conds, ["AND", "AND"]) == " AND ".join(conds)
+    assert join_conditions(conds, ["AND"]) == " AND ".join(conds)
+    assert join_conditions(conds, ["OR"]) == " OR ".join(conds)
+
+    excpected = ('"host" =~ /^$host$/ '
+                 'AND "collisionSetup" =~ /^$collisionSetup$/ '
+                 'OR "cellsPerBlock_0" =~ /^$cellsPerBlock_0$/')
+    assert join_conditions(conds, ["AND", "OR"]) == excpected