Module: check_mk
Branch: master
Commit: 79e76580e38dd3336348c32507d901c5edf4ee11
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=79e76580e38dd3…
Author: Simon Betz <si(a)mathias-kettner.de>
Date: Fri Aug 26 11:59:33 2016 +0200
minor cleanup
---
web/plugins/views/availability.py | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/web/plugins/views/availability.py b/web/plugins/views/availability.py
index e16419f..356216f 100644
--- a/web/plugins/views/availability.py
+++ b/web/plugins/views/availability.py
@@ -82,7 +82,6 @@ def render_availability_options(what):
is_open = True
if html.var("_unset_logrow_limit") == "1":
- html.set_var("avo_logrow_limit", 0)
avoptions["logrow_limit"] = 0
range_vs = None
@@ -575,10 +574,10 @@ def render_bi_availability(title, aggr_rows):
htmlcode + \
'</td></tr></table>'
- # Note: '__has_reached_logrow_limit' is used for all other
- # availability views but not for BI. There we have to take
+ # Note: 'spans_by_object' returns two arguments which are used by
+ # all availability views but not by BI. There we have to take
# only complete aggregations
- av_rawdata, __has_reached_logrow_limit = availability.spans_by_object(spans, None)
+ av_rawdata = availability.spans_by_object(spans, None)[0]
av_data = availability.compute_availability("bi", av_rawdata, avoptions)
# If we abolish the limit we have to fetch the data again
Module: check_mk
Branch: master
Commit: 94c9cc7980c6e34c6fcb99290e3dcaa6e2f57df2
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=94c9cc7980c6e3…
Author: Simon Betz <si(a)mathias-kettner.de>
Date: Fri Aug 26 09:48:49 2016 +0200
3781 FIX ipmi: made check more robust against incomplete datasets
---
.werks/3781 | 10 ++++++++++
ChangeLog | 1 +
checks/ipmi | 52 ++++++++++++++++++++++++++++++++++++++++------------
3 files changed, 51 insertions(+), 12 deletions(-)
diff --git a/.werks/3781 b/.werks/3781
new file mode 100644
index 0000000..02a3f9d
--- /dev/null
+++ b/.werks/3781
@@ -0,0 +1,10 @@
+Title: ipmi: made check more robust against incomplete datasets
+Level: 1
+Component: checks
+Class: fix
+Compatible: compat
+State: unknown
+Version: 1.4.0i1
+Date: 1472197703
+
+
diff --git a/ChangeLog b/ChangeLog
index faa9b37..274b00f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -356,6 +356,7 @@
* 3766 FIX: palo_alto_sessions: Fixed possible division by zero error
* 3769 FIX: cisco_temperature: Fixed exception in case of empty SNMP data received from device
* 3770 FIX: win_dhcp_pools: Fixed crash in case of empty DHCP pools
+ * 3781 FIX: ipmi: made check more robust against incomplete datasets
Multisite:
* 3187 notification view: new filter for log command via regex
diff --git a/checks/ipmi b/checks/ipmi
index c660fcd..5cc1381 100644
--- a/checks/ipmi
+++ b/checks/ipmi
@@ -112,6 +112,32 @@
# PS1 Status | C8h | ok | 10.1 | Presence detected, Failure detected <= NOT OK !!
# PS2 Status | C9h | ok | 10.2 | Presence detected
+# broken
+# <<<ipmi:cached(1472175405,300)>>>
+# 01-Inlet_Ambient 18.000 degrees_C ok na na na na 42.000 46.000
+# 02-CPU_1 40.000 degrees_C ok na na na na 70.000 na
+# 03-CPU_2 40.000 degrees_C ok na na na na 70.000 na
+# 04-DIMM_P1_1-3 32.000 degrees_C ok na na na na 87.000 na
+# 05-DIMM_P1_4-6 32.000 degrees_C ok na na na na 87.000 na
+# 06-DIMM_P2_1-3 27.000 degrees_C ok na na na na 87.000 na
+# 07-DIMM_P2_4-6 26.000 degrees_C ok na na na na 87.000 na
+# 09-Chipset 47.000 degrees_C ok na na na na 105.000 na
+# 10-VR_P1 32.000 degrees_C ok na na na na 115.000 120.000
+# 11-VR_P2 27.000 degrees_C ok na na na na 115.000 120.000
+# 12-VR_P1_Zone 27.000 degrees_C ok na na na na 80.000 85.000
+# 13-VR_P2_Zone 25.000 degrees_C ok na na na na 80.000 85.000
+# 14-VR_P1_Mem 33.000 degrees_C ok na na na na 115.000 120.000
+# 15-VR_P2_Mem 23.000 degrees_C ok na na na na 115.000 120.000
+# 16-VR_P1Mem_Zone 32.000 degrees_C ok na na na na 80.000 85.000
+# 17-VR_P2Mem_Zone 22.000 degrees_C ok na na na na 80.000 85.000
+# 18-HD_Controller 57.000 degrees_C ok na na na na 90.000 na
+# 19-Supercap 32.000 degrees_C ok na na na na 65.000 na
+# 21-PCI_Zone 30.000 degrees_C ok na na na na 80.000 85.000
+# 23-I/O_1_Zone 28.000 degrees_C ok na na na na 80.000 85.000
+# 26-I/O_LOM 40.000 degrees_C ok na na na na 100.000 na
+# 27-Sys_Exhaust 31.000 degrees_C ok na na na na 80.000 85.000
+# PS3_Inpu
+
ipmi_summarize = True
ipmi_ignore_nr = False # set to True in order to ignore entries with state 'nr'
@@ -146,18 +172,20 @@ def parse_ipmi(info):
for section in [ ipmi_info, ipmi_discrete_info ]:
if section:
for line in section:
- # Compatible with older check versions
- name = line[0].strip().replace(" ", "_")
- line = [ name ] + map(lambda x: x.strip(), line[1:])
-
- # Discrete sensors have no values
- if len(line) <= 5:
- state = line[2]
- if line[4]:
- state += " (%s)" % line[4]
- line = [line[0], None, None, state, None, None, None, None, None, None]
-
- parsed.append(line)
+ if len(line) >= 2:
+ # Compatible with older check versions
+ name = line[0].strip().replace(" ", "_")
+ line = [ name ] + map(lambda x: x.strip(), line[1:])
+
+ # Discrete sensors have no values
+ if len(line) == 5:
+ state = line[2]
+ if line[4]:
+ state += " (%s)" % line[4]
+ line = [line[0], None, None, state, None, None, None, None, None, None]
+
+ if len(line) == 10:
+ parsed.append(line)
return parsed
Module: check_mk
Branch: master
Commit: 9ca1c32b066b5ee7f38cf9ef97a783f4c186a436
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=9ca1c32b066b5e…
Author: Simon Betz <si(a)mathias-kettner.de>
Date: Fri Aug 26 11:23:58 2016 +0200
added log row limit for BI
---
web/htdocs/availability.py | 19 ++++++++++++++-----
web/plugins/views/availability.py | 36 +++++++++++++++++++++++++++++++-----
2 files changed, 45 insertions(+), 10 deletions(-)
diff --git a/web/htdocs/availability.py b/web/htdocs/availability.py
index 688c380..deb216a 100644
--- a/web/htdocs/availability.py
+++ b/web/htdocs/availability.py
@@ -1536,13 +1536,16 @@ def get_bi_availability_rawdata(filterheaders, only_sites, av_object, include_ou
raise Exception("Not implemented yet. Sorry.")
-def get_bi_spans(tree, aggr_group, avoptions, timewarp):
+def get_bi_spans(tree, aggr_group, avoptions, countdown_logrow_limit, timewarp):
time_range, range_title = avoptions["range"]
- phases_list = get_bi_leaf_history(tree, aggr_group, time_range)
- return compute_bi_timeline(tree, aggr_group, time_range, timewarp, phases_list)
+ phases_list, countdown_logrow_limit = \
+ get_bi_leaf_history(tree, aggr_group, time_range, countdown_logrow_limit)
-def get_bi_leaf_history(tree, aggr_group, time_range):
+ return compute_bi_timeline(tree, aggr_group, time_range, timewarp, phases_list), countdown_logrow_limit
+
+
+def get_bi_leaf_history(tree, aggr_group, time_range, countdown_logrow_limit):
# Get state history of all hosts and services contained in the tree.
# In order to simplify the query, we always fetch the information for
# all hosts of the aggregates.
@@ -1586,6 +1589,12 @@ def get_bi_leaf_history(tree, aggr_group, time_range):
columns = ["site"] + columns
rows = [ dict(zip(columns, row)) for row in data ]
+ # Now we find out if the log row limit was exceeded or
+ # if the log's length is the limit by accident.
+ # If this limit was exceeded then we cut off the last element
+ # in spans_by_object because it might be incomplete.
+ countdown_logrow_limit = countdown_logrow_limit - len(data)
+
# Reclassify base data due to annotations
rows = reclassify_bi_rows(rows)
@@ -1608,7 +1617,7 @@ def get_bi_leaf_history(tree, aggr_group, time_range):
for from_time in sorted_times:
phases_list.append((from_time, phases[from_time]))
- return phases_list
+ return phases_list, countdown_logrow_limit
def compute_bi_timeline(tree, aggr_group, time_range, timewarp, phases_list):
diff --git a/web/plugins/views/availability.py b/web/plugins/views/availability.py
index a76129d..e16419f 100644
--- a/web/plugins/views/availability.py
+++ b/web/plugins/views/availability.py
@@ -81,6 +81,10 @@ def render_availability_options(what):
html.add_user_error(e.varname, e)
is_open = True
+ if html.var("_unset_logrow_limit") == "1":
+ html.set_var("avo_logrow_limit", 0)
+ avoptions["logrow_limit"] = 0
+
range_vs = None
for name, height, show_in_reporting, vs in avoption_entries:
if name == 'rangespec':
@@ -245,7 +249,7 @@ def render_availability_page(view, datasource, filterheaders, only_sites, limit)
"<b>Note:</b> The number of shown rows does not necessarily reflect the "
"matched entries and the result might be incomplete. ") % avoptions["logrow_limit"]
text += '<a href="%s">%s</a>' % \
- (html.makeuri([("avo_logrow_limit", 0)]), _('Repeat query without limit.'))
+ (html.makeuri([("_unset_logrow_limit", "1")]), _('Repeat query without limit.'))
html.show_warning(text)
do_render_availability(what, av_rawdata, av_data, av_mode, av_object, avoptions)
@@ -504,6 +508,8 @@ def render_bi_availability(title, aggr_rows):
timewarpcode = ""
if not html.has_user_errors():
+ countdown_logrow_limit = avoptions["logrow_limit"]
+ has_reached_logrow_limit = False
spans = []
for aggr_row in aggr_rows:
tree = aggr_row["aggr_tree"]
@@ -512,7 +518,16 @@ def render_bi_availability(title, aggr_rows):
timewarp = int(html.var("timewarp"))
except:
timewarp = None
- these_spans, timewarp_tree_state = availability.get_bi_spans(tree, aggr_row["aggr_group"], avoptions, timewarp)
+
+ (these_spans, timewarp_tree_state), countdown_logrow_limit = \
+ availability.get_bi_spans(tree, aggr_row["aggr_group"], avoptions, countdown_logrow_limit, timewarp)
+
+ # We take only complete aggregations i.d. if we have
+ # undershot the log row limit then we ignore the rest
+ if avoptions["logrow_limit"] and countdown_logrow_limit < 0:
+ has_reached_logrow_limit = True
+ break
+
spans += these_spans
if timewarp and timewarp_tree_state:
state, assumed_state, node, subtrees = timewarp_tree_state
@@ -560,11 +575,22 @@ def render_bi_availability(title, aggr_rows):
htmlcode + \
'</td></tr></table>'
- # FIXME non implemented log row limit
- av_rawdata, has_reached_logrow_limit = availability.spans_by_object(spans, None)
-
+ # Note: '__has_reached_logrow_limit' is used for all other
+ # availability views but not for BI. There we have to take
+ # only complete aggregations
+ av_rawdata, __has_reached_logrow_limit = availability.spans_by_object(spans, None)
av_data = availability.compute_availability("bi", av_rawdata, avoptions)
+ # If we abolish the limit we have to fetch the data again
+ # with changed logrow_limit = 0, which means no limit
+ if has_reached_logrow_limit:
+ text = _("Your query matched more than %d log entries. "
+ "<b>Note:</b> The number of shown rows does not necessarily reflect the "
+ "matched entries and the result might be incomplete. ") % avoptions["logrow_limit"]
+ text += '<a href="%s">%s</a>' % \
+ (html.makeuri([("_unset_logrow_limit", "1")]), _('Repeat query without limit.'))
+ html.show_warning(text)
+
if html.output_format == "csv_export":
output_availability_csv("bi", av_data, avoptions)
return
Module: check_mk
Branch: master
Commit: 25cbc6890e31cbf84f884d210f0b628e70631801
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=25cbc6890e31cb…
Author: Lars Michelsen <lm(a)mathias-kettner.de>
Date: Thu Aug 25 15:06:48 2016 +0200
3771 FIX Fixed exception on edit rule page in case of special URLs
---
.werks/3771 | 10 ++++++++++
ChangeLog | 1 +
web/plugins/wato/mkeventd.py | 11 +++++++++--
3 files changed, 20 insertions(+), 2 deletions(-)
diff --git a/.werks/3771 b/.werks/3771
new file mode 100644
index 0000000..ba2b380
--- /dev/null
+++ b/.werks/3771
@@ -0,0 +1,10 @@
+Title: Fixed exception on edit rule page in case of special URLs
+Level: 1
+Component: ec
+Class: fix
+Compatible: compat
+State: unknown
+Version: 1.4.0i1
+Date: 1472130399
+
+
diff --git a/ChangeLog b/ChangeLog
index f807724..faa9b37 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -632,6 +632,7 @@
* 3566 FIX: Fixed exception on SNMP MIB management page when MIB directory is missing
* 3568 FIX: Fixed broken link in "Access to event status via TCP" help text
* 3589 FIX: Fixed processing of RFC 5424 syslog messages
+ * 3771 FIX: Fixed exception on edit rule page in case of special URLs
Livestatus:
* 3644 Fixed 'source' column in 'comments' table.
diff --git a/web/plugins/wato/mkeventd.py b/web/plugins/wato/mkeventd.py
index e9d2093..7a5cf39 100644
--- a/web/plugins/wato/mkeventd.py
+++ b/web/plugins/wato/mkeventd.py
@@ -1463,10 +1463,14 @@ def mode_mkeventd_edit_rule(phase):
if html.has_var("rule_pack"):
rule_pack_nr, rule_pack = rule_pack_with_id(rule_packs, html.var("rule_pack"))
- # In links from multisite views the rule pack is not known.
- # We just know the rule id and need to find the pack ourselves.
else:
+ # In links from multisite views the rule pack is not known.
+ # We just know the rule id and need to find the pack ourselves.
rule_id = html.var("rule_id")
+ if rule_id == None:
+ raise MKUserError("rule_id", _("The rule you are trying to edit does not exist."))
+
+ rule_pack = None
for nr, pack in enumerate(rule_packs):
for rnr, rule in enumerate(pack["rules"]):
if rule_id == rule["id"]:
@@ -1476,6 +1480,9 @@ def mode_mkeventd_edit_rule(phase):
html.set_var("rule_pack", pack["id"])
break
+ if not rule_pack:
+ raise MKUserError("rule_id", _("The rule you are trying to edit does not exist."))
+
rules = rule_pack["rules"]