Module: check_mk
Branch: master
Commit: 85e4ba64f19e6f061a9447214048a2406331d1ae
URL:
http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=85e4ba64f19e6f…
Author: Óscar Nájera <on(a)mathias-kettner.de>
Date: Thu Feb 21 18:07:21 2019 +0100
Perf data translation and normalization
CMK-1722
Change-Id: Iebf7a83cd27964d65e955570f5958c686c28df7a
---
cmk/gui/plugins/metrics/utils.py | 107 ++++++++++++-----------
tests/unit/cmk/gui/plugins/test_metrics_utils.py | 92 ++++++++++++++-----
2 files changed, 126 insertions(+), 73 deletions(-)
diff --git a/cmk/gui/plugins/metrics/utils.py b/cmk/gui/plugins/metrics/utils.py
index eab13ad..5c55289 100644
--- a/cmk/gui/plugins/metrics/utils.py
+++ b/cmk/gui/plugins/metrics/utils.py
@@ -210,10 +210,10 @@ def _split_perf_data(perf_data_string):
return [s.decode('utf-8') for s in
shlex.split(perf_data_string.encode('utf-8'))]
-def perfvar_translation(perfvar_nr, perfvar_name, check_command):
+def perfvar_translation(perfvar_name, check_command):
"""Get translation info for one performance var."""
cm = check_metrics.get(check_command, {})
- translation_entry = {} # Default: no translation neccessary
+ translation_entry = {} # Default: no translation necessary
if perfvar_name in cm:
translation_entry = cm[perfvar_name]
@@ -224,70 +224,72 @@ def perfvar_translation(perfvar_nr, perfvar_name, check_command):
translation_entry = te
break
- metric_name = translation_entry.get("name", perfvar_name)
- scale = translation_entry.get("scale", 1.0)
-
return {
- "name": metric_name,
- "scale": scale,
- "auto_graph": translation_entry.get("auto_graph", True)
+ "name": translation_entry.get("name", perfvar_name),
+ "scale": translation_entry.get("scale", 1.0),
+ "auto_graph": translation_entry.get("auto_graph", True),
}
+def normalize_perf_data(perf_data, check_command):
+ translation_entry = perfvar_translation(perf_data[0], check_command)
+
+ new_entry = {
+ "orig_name": perf_data[0],
+ "value": perf_data[1] * translation_entry["scale"],
+ "scalar": {},
+ "scale": translation_entry["scale"], # needed for graph
recipes
+ # Do not create graphs for ungraphed metrics if listed here
+ "auto_graph": translation_entry["auto_graph"],
+ }
+
+ # Add warn, crit, min, max
+ for perf_value, name in zip(perf_data[3:], ["warn", "crit",
"min", "max"]):
+ if perf_value is not None:
+ try:
+ new_entry["scalar"][name] = perf_value *
translation_entry["scale"]
+ except Exception as exc:
+ if config.debug:
+ raise exc
+ return translation_entry["name"], new_entry
+
+
+def get_metric_info(metric_name, color_index):
+
+ if metric_name not in metric_info:
+ color_index += 1
+ palette_color = get_palette_color_by_index(color_index)
+ mi = {
+ "title": metric_name.title(),
+ "unit": "",
+ "color": parse_color_into_hexrgb(palette_color),
+ }
+ else:
+ mi = metric_info[metric_name].copy()
+ mi["color"] = parse_color_into_hexrgb(mi["color"])
+
+ return mi, color_index
+
+
def translate_metrics(perf_data, check_command):
"""Convert Ascii-based performance data as output from a check plugin
- into floating point numbers, do scaling if neccessary.
+ into floating point numbers, do scaling if necessary.
Simple example for perf_data: [(u'temp', u'48.1', u'',
u'70', u'80', u'', u'')]
Result for this example:
- { "temp" : "value" : 48.1, "warn" : 70,
"crit" : 80, "unit" : { ... } }
+ { "temp" : {"value" : 48.1, "scalar": {"warn"
: 70, "crit" : 80}, "unit" : { ... } }}
"""
translated_metrics = {}
color_index = 0
- for nr, entry in enumerate(perf_data):
- varname = entry[0]
- value = entry[1]
-
- translation_entry = perfvar_translation(nr, varname, check_command)
- metric_name = translation_entry["name"]
+ for entry in perf_data:
+ metric_name, new_entry = normalize_perf_data(entry, check_command)
if metric_name in translated_metrics:
continue # ignore duplicate value
- if metric_name not in metric_info:
- color_index += 1
- palette_color = get_palette_color_by_index(color_index)
- mi = {
- "title": metric_name.title(),
- "unit": "",
- "color": parse_color_into_hexrgb(palette_color),
- }
- else:
- mi = metric_info[metric_name].copy()
- mi["color"] = parse_color_into_hexrgb(mi["color"])
-
- new_entry = {
- "value": value * translation_entry["scale"],
- "orig_name": varname,
- "scale": translation_entry["scale"], # needed for graph
recipes
- "scalar": {},
- }
-
- # Do not create graphs for ungraphed metrics if listed here
- new_entry["auto_graph"] = translation_entry["auto_graph"]
-
- # Add warn, crit, min, max
- for index, key in [(3, "warn"), (4, "crit"), (5,
"min"), (6, "max")]:
- if len(entry) < index + 1:
- break
- elif entry[index] is not None:
- try:
- new_entry["scalar"][key] = entry[index] *
translation_entry["scale"]
- except:
- if config.debug:
- raise
-
+ mi, color_index = get_metric_info(metric_name, color_index)
new_entry.update(mi)
+
new_entry["unit"] = unit_info[new_entry["unit"]]
translated_metrics[metric_name] = new_entry
@@ -764,7 +766,8 @@ def _get_hue_by_weight_index(weight_index):
def parse_color_into_hexrgb(color_string):
if color_string[0] == "#":
return color_string
- elif "/" in color_string:
+
+ if "/" in color_string:
cmk_color_index, color_shading = color_string.split("/")
hsv = list(_cmk_color_palette[cmk_color_index])
@@ -783,8 +786,8 @@ def parse_color_into_hexrgb(color_string):
color_hexrgb = hsv_to_hexrgb(hsv)
return color_hexrgb
- else:
- return "#808080"
+
+ return "#808080"
def hsv_to_hexrgb(hsv):
diff --git a/tests/unit/cmk/gui/plugins/test_metrics_utils.py
b/tests/unit/cmk/gui/plugins/test_metrics_utils.py
index 4fe7ac1..0041fae 100644
--- a/tests/unit/cmk/gui/plugins/test_metrics_utils.py
+++ b/tests/unit/cmk/gui/plugins/test_metrics_utils.py
@@ -4,31 +4,32 @@ import cmk.gui.config
from cmk.gui.plugins.metrics import utils
-(a)pytest.mark.parametrize(
- "data_string, result",
- [
- ("he lo", ["he", "lo"]),
- ("'há li'", ["há li"]),
- (u"hé ßß", [u"hé", u"ßß"]),
- ],
-)
+(a)pytest.mark.parametrize("data_stringtring, result", [
+ ("he lo", ["he", "lo"]),
+ ("'há li'", ["há li"]),
+ (u"hé ßß", [u"hé", u"ßß"]),
+])
def test_split_perf_data(data_string, result):
assert utils._split_perf_data(data_string) == result
@pytest.mark.parametrize("perf_str, check_command, result", [
- ('', None, ([], None)),
- ('hi ho', None, ([], None)),
- ('hi=6 [ihe]', 'ter', ([('hi', 6, '', None, None,
None, None)], 'ihe')),
- (u'hi=l6 [ihe]', 'ter', ([], 'ihe')),
- (u'hi=6 [ihe]', 'ter', ([('hi', 6, '', None, None,
None, None)], 'ihe')),
- ('hi=5 no=6', 'test', ([('hi', 5, u'', None, None,
None, None),
- ('no', 6, u'', None, None, None, None)],
'test')),
- ('hi=5;6;7;8;9 \'not here\'=6;5.6;;;', 'test',
- ([('hi', 5, u'', 6, 7, 8, 9), ('not_here', 6, u'',
5.6, None, None, None)], 'test')),
- ('hi=5G;;;; \'not here\'=6M;5.6;;;', 'test',
- ([('hi', 5, u'G', None, None, None, None),
- ('not_here', 6, u'M', 5.6, None, None, None)], 'test')),
+ ("", None, ([], None)),
+ ("hi=6 [ihe]", "ter", ([("hi", 6, "", None,
None, None, None)], "ihe")),
+ (u"hi=l6 [ihe]", "ter", ([], "ihe")),
+ (u"hi=6 [ihe]", "ter", ([("hi", 6, "", None,
None, None, None)], "ihe")),
+ ("hi=5 no=6", "test", ([
+ ("hi", 5, u"", None, None, None, None),
+ ("no", 6, u"", None, None, None, None),
+ ], "test")),
+ ("hi=5;6;7;8;9 'not here'=6;5.6;;;", "test", ([
+ ("hi", 5, u"", 6, 7, 8, 9),
+ ("not_here", 6, u"", 5.6, None, None, None),
+ ], "test")),
+ ("hi=5G;;;; 'not here'=6M;5.6;;;", "test", ([
+ ("hi", 5, u"G", None, None, None, None),
+ ("not_here", 6, u"M", 5.6, None, None, None),
+ ], "test")),
])
def test_parse_perf_data(perf_str, check_command, result):
assert utils.parse_perf_data(perf_str, check_command) == result
@@ -37,4 +38,53 @@ def test_parse_perf_data(perf_str, check_command, result):
def test_parse_perf_data2(monkeypatch):
with pytest.raises(ValueError):
monkeypatch.setattr(cmk.gui.config, "debug", True)
- utils.parse_perf_data('hi ho', None)
+ utils.parse_perf_data("hi ho", None)
+
+
+(a)pytest.mark.parametrize("perf_name_name, check_command, result", [
+ ("in", "check_mk-lnx_if", {
+ "scale": 8,
+ "name": "if_in_bps",
+ "auto_graph": True
+ }),
+ ("memused", "check_mk-hr_mem", {
+ "auto_graph": False,
+ "name": "total_used",
+ "scale": 1024**2
+ }),
+ ("fake", "check_mk-imaginary", {
+ "auto_graph": True,
+ "name": "fake",
+ "scale": 1.0
+ }),
+])
+def test_perfvar_translation(perf_name, check_command, result):
+ assert utils.perfvar_translation(perf_name, check_command) == result
+
+
+(a)pytest.mark.parametrize("perf_data_data, check_command, result", [
+ (("in", 496876.200933, "", None, None, 0, 125000000),
'check_mk-lnx_if', ('if_in_bps', {
+ "orig_name": "in",
+ "value": 3975009.607464,
+ "scalar": {
+ "max": 1000000000,
+ "min": 0
+ },
+ "scale": 8,
+ "auto_graph": True,
+ })),
+ (("fast", 5, "", 4, 9, 0, 10), 'check_mk-imaginary',
('fast', {
+ "orig_name": "fast",
+ "value": 5.0,
+ "scalar": {
+ "warn": 4.0,
+ "crit": 9.0,
+ "min": 0.0,
+ "max": 10.0
+ },
+ "scale": 1.0,
+ "auto_graph": True,
+ })),
+])
+def test_normalize_perf_data(perf_data, check_command, result):
+ assert utils.normalize_perf_data(perf_data, check_command) == result