Module: check_mk
Branch: master
Commit: d884fe0b5c2c1f78c1e3b52c732db2980c78b9fc
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=d884fe0b5c2c1f…
Author: Andreas Boesl <ab(a)mathias-kettner.de>
Date: Thu Jan 17 15:27:48 2013 +0100
Fix: Distritubed monitoring setup:
- site configuration changes no longer mark all sites as dirty
- correct logfile cleanup on 'Activate changes'
---
ChangeLog | 2 ++
web/htdocs/wato.py | 18 ++++++++++++++----
2 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/ChangeLog b/ChangeLog
index 7d8a0c5..f1b8e73 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -20,6 +20,8 @@
* Storing row selections in user files, cleaned up row selection
handling to single files. Cleaned up GET/POST mixups in confirm dialogs
* Add: New user_options to limit seen nagios objects even the role is set to see all
+ * Fix: On site configaration changes, only relevant sites are marked as dirty
+ * Fix: Distributed setup: Correct cleanup of pending changes logfile after "Activate changes"
1.2.1i4:
Core:
diff --git a/web/htdocs/wato.py b/web/htdocs/wato.py
index cabef4c..ba6f916 100644
--- a/web/htdocs/wato.py
+++ b/web/htdocs/wato.py
@@ -6653,16 +6653,22 @@ def mode_edit_site(phase):
if not new and "secret" in old_site:
new_site["secret"] = old_site["secret"]
+
save_sites(sites)
+
+ # Own site needs SYNCRESTART in any case
+ update_replication_status(our_site_id(), { "need_restart" : True })
+
if new:
- log_pending(SYNCRESTART, None, "edit-sites", _("Created new connection to site %s" % id))
+ update_replication_status(id, { "need_sync" : True, "need_restart" : True })
+ log_pending(AFFECTED, None, "edit-sites", _("Created new connection to site %s" % id))
else:
- log_pending(SYNCRESTART, None, "edit-sites", _("Modified site connection %s" % id))
+ log_pending(AFFECTED, None, "edit-sites", _("Modified site connection %s" % id))
# Replication mode has switched on/off => handle replication state
repstatus = load_replication_status()
- if repl and id not in repstatus: # Repl switched on
+ if repl: # Repl is on
update_replication_status(id, { "need_sync" : True, "need_restart" : True })
- elif (not repl) and id in repstatus:
+ elif id in repstatus: # Repl switched off
update_replication_status(id, None) # Replication switched off
if is_distributed() and global_replication_state() == "clean":
log_commit_pending()
@@ -7281,6 +7287,10 @@ def ajax_replication():
result = str(e)
if result == True:
answer = "OK:" + _("Success");
+ # Make sure that the pending changes are clean as soon as the
+ # last site has successfully been updated.
+ if is_distributed() and global_replication_state() == "clean":
+ log_commit_pending()
else:
answer = "<div class=error>%s: %s</div>" % (_("Error"), hilite_errors(result))
Module: check_mk
Branch: master
Commit: e842afe6103a8d4140d3ffe875ff1a2fa2b3664d
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=e842afe6103a8d…
Author: Lars Michelsen <lm(a)mathias-kettner.de>
Date: Thu Jan 17 14:55:25 2013 +0100
FIX: PING services on clusters are treated like the host check of clusters
---
.bugs/786 | 7 +++++--
ChangeLog | 1 +
modules/check_mk.py | 9 +++++++--
3 files changed, 13 insertions(+), 4 deletions(-)
diff --git a/.bugs/786 b/.bugs/786
index 7f09683..9ddd303 100644
--- a/.bugs/786
+++ b/.bugs/786
@@ -1,9 +1,9 @@
Title: PING check for clusters without services UNKNOWN
Component: core
-State: open
+Class: nastiness
+State: done
Date: 2012-07-26 16:29:56
Targetversion: 1.2.2
-Class: nastiness
If you have a cluster without any services, then Check_MK will
create a PING services. This will be UNKNOWN. Couldn't we doe
@@ -16,3 +16,6 @@ should behave just like the host check:
* Use the IP Adress of the cluster if available
* PING all nodes with check_icmp otherwise
+
+2013-01-17 14:54:46: changed state open -> done
+Using existing check-mk-ping-cluster command now for PING services on clusters.
diff --git a/ChangeLog b/ChangeLog
index f009631..86173bb 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -2,6 +2,7 @@
Core:
* Improved handling of CTRL+C (SIGINT) to terminate long runnining tasks
(e.g. inventory of SNMP hosts)
+ * FIX: PING services on clusters are treated like the host check of clusters
Checks & Agents:
* Linux Agent, diskstat: Now supporting /dev/emcpower* devices (Thanks to Claas Rockmann-Buchterkirche)
diff --git a/modules/check_mk.py b/modules/check_mk.py
index ad39048..bfe2d7b 100755
--- a/modules/check_mk.py
+++ b/modules/check_mk.py
@@ -1931,17 +1931,22 @@ define service {
""" % (template, hostname, description, simulate_command(command),
command_line and 1 or 0, extraconf))
+ # Levels for host check
+ if is_cluster(hostname):
+ ping_command = 'check-mk-ping-cluster'
+ else:
+ ping_command = 'check-mk-ping'
# No check_mk service, no legacy service -> create PING service
if not have_at_least_one_service and not legchecks and not actchecks:
outfile.write("""
define service {
use\t\t\t\t%s
- check_command\t\t\tcheck-mk-ping!%s
+ check_command\t\t\t%s!%s
%s host_name\t\t\t%s
}
-""" % (pingonly_template, check_icmp_arguments(hostname), extra_service_conf_of(hostname, "PING"), hostname))
+""" % (pingonly_template, ping_command, check_icmp_arguments(hostname), extra_service_conf_of(hostname, "PING"), hostname))
def simulate_command(command):
if simulation_mode:
Module: check_mk
Branch: master
Commit: dd604222bb9e1ac1429935b6a7645a4276e14536
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=dd604222bb9e1a…
Author: Lars Michelsen <lm(a)mathias-kettner.de>
Date: Thu Jan 17 15:19:50 2013 +0100
Added FOREACH_SERVICE capability to leaf nodes
---
.bugs/785 | 16 ++++++++++++++--
ChangeLog | 3 +++
web/htdocs/bi.py | 6 +++++-
3 files changed, 22 insertions(+), 3 deletions(-)
diff --git a/.bugs/785 b/.bugs/785
index 07683d9..8f28356 100644
--- a/.bugs/785
+++ b/.bugs/785
@@ -1,9 +1,9 @@
Title: FOREACH_SERVICE in a leaf node does not work for direct host/services
Component: bi
-State: open
+Class: bug
+State: done
Date: 2012-07-26 16:05:16
Targetversion: 1.2.2
-Class: bug
If you use FOREACH_SERVICE within an inner node then you cannot
directly add leaf nodes. The following produces an error:
@@ -14,3 +14,15 @@ aggregation_rules[...] = (
( FOREACH_SERVICE, ALL_HOSTS, ".*", "$1$", HOST_STATE ),
]
)
+
+2013-01-17 15:19:06: changed state open -> done
+1. added FOREACH_SERVICE for leaf nodes
+2. the following works for me now:
+
+(FOREACH_SERVICE, "localhost", "fs_(.*)", "filesystem", [ "localhost", "$1$" ] ),
+(FOREACH_SERVICE, "localhost", "fs_(.*)", "localhost", "fs_$1$"),
+(FOREACH_SERVICE, "localhost", "fs_(.*)", "localhost", "fs_$1$" ),
+(FOREACH_SERVICE, "localhost", "fs_(.*)", "localhost", HOST_STATE ),
+(FOREACH_SERVICE, "(.*)", "fs_(.*)", "$1$", HOST_STATE ),
+
+Some of the rules are senseless but should work in general.
diff --git a/ChangeLog b/ChangeLog
index 86173bb..dc1685a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -40,6 +40,9 @@
* FIX: Fixed displaying of tag selections when creating a rule in the ruleeditor
* FIX: Rulesets are always cloned in the same folder
+ BI:
+ * Added FOREACH_SERVICE capability to leaf nodes
+
1.2.1i4:
Core:
* Better exception handling when executing "Check_MK"-Check. Printing python
diff --git a/web/htdocs/bi.py b/web/htdocs/bi.py
index c8b5c16..ad13490 100644
--- a/web/htdocs/bi.py
+++ b/web/htdocs/bi.py
@@ -736,7 +736,11 @@ def compile_aggregation_rule(aggr_type, rule, args, lvl):
new_elements = new_new_elements
elif type(node[-1]) != list:
- if node[0] in [ config.FOREACH_HOST, config.FOREACH_CHILD, config.FOREACH_PARENT ]:
+ if node[0] in [
+ config.FOREACH_HOST,
+ config.FOREACH_CHILD,
+ config.FOREACH_PARENT,
+ config.FOREACH_SERVICE ]:
# Handle case that leaf elements also need to be iterable via FOREACH_HOST
# 1: config.FOREACH_HOST
# 2: (['waage'], '(.*)')