logwatch rules were ineffective
Message-ID: <54084bc2.9K9DvTWW336FJ0wm%mk(a)mathias-kettner.de>
User-Agent: Heirloom mailx 12.4 7/29/08
MIME-Version: 1.0
Content-Type: text/plain; charset=us-ascii
Content-Transfer-Encoding: 7bit
Module: check_mk
Branch: master
Commit: 1f51d62d603ef99f8c5d2de869c73239b90e54d4
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=1f51d62d603ef9…
Author: Mathias Kettner <mk(a)mathias-kettner.de>
Date: Thu Sep 4 13:23:41 2014 +0200
logwatch: fixed bug from yesterday, logwatch rules were ineffective
---
checks/fileinfo | 2 +-
checks/logwatch | 18 +++++++-----------
web/htdocs/wato.py | 15 +++++++++++----
3 files changed, 19 insertions(+), 16 deletions(-)
diff --git a/checks/fileinfo b/checks/fileinfo
index 0599650..3919f76 100644
--- a/checks/fileinfo
+++ b/checks/fileinfo
@@ -114,7 +114,7 @@ def fileinfo_groups_precompile(hostname, item, params):
for group_name, pattern in line:
if group_name == item:
if type(pattern) == str: # support old format
- pattern = ( pattern, '' )
+ pattern = (pattern, '')
patterns.append(pattern)
params['precompiled_patterns'] = patterns
return params
diff --git a/checks/logwatch b/checks/logwatch
index 549ea5a..2f4ebe9 100644
--- a/checks/logwatch
+++ b/checks/logwatch
@@ -83,11 +83,11 @@ def inventory_logwatch(info):
if groups:
continue
else:
- inventory.append((logfile, []))
+ inventory.append((logfile, None))
return inventory
-#logwatch_patterns = {
+# logwatch_patterns = {
# 'System': [
# ( 'W', 'sshd' ),
# ( ['host1', 'host2'], 'C', 'ssh' ), # only applies to certain hosts
@@ -256,8 +256,8 @@ precompile_params['logwatch'] = logwatch_precompile
# | |
# '----------------------------------------------------------------------'
-def logwatch_group_precompile( hostname, item, _unused):
- return logwatch_precompile( hostname, item, None ), host_extra_conf( hostname, logwatch_groups )
+def logwatch_group_precompile(hostname, item, _unused):
+ return logwatch_precompile(hostname, item, None), host_extra_conf(hostname, logwatch_groups)
def logwatch_groups_of_logfile(filename, params=False):
import fnmatch
@@ -283,7 +283,7 @@ def inventory_logwatch_groups(info):
for group in groups:
if group not in added_groups:
added_groups.append(group)
- inventory.append((group, [] ))
+ inventory.append((group, None))
return inventory
def check_logwatch_groups(item, params, info):
@@ -359,9 +359,7 @@ def check_logwatch_generic(item, params, loglines, found, groups=False):
if os.path.exists(logfile) and os.path.getsize(logfile) > logwatch_max_filesize:
return (2, "unacknowledged messages have exceeded max size (%d Bytes)" % logwatch_max_filesize)
- #
# Write out new log lines (no reclassify here. It is done later in general for all logs)
- #
if len(loglines) > 0:
worst = -1
for line in loglines:
@@ -380,8 +378,8 @@ def check_logwatch_generic(item, params, loglines, found, groups=False):
# Get the patterns (either compile or reuse the precompiled ones)
# Check_MK creates an empty string if the precompile function has
- # not been exectued yet. The precomile function creates an empty
- # list when no rules/patterns are defined. In case of the logwatch.groups
+ # not been executed yet. The precompile function creates an empty
+ # list when no ruless/patterns are defined. In case of the logwatch.groups
# checks, params are a tuple with the normal logwatch parameters on the first
# and the grouping patterns on the second position
if params not in ('', None):
@@ -399,10 +397,8 @@ def check_logwatch_generic(item, params, loglines, found, groups=False):
level = newlevel
return level
- #
# Read current log messages, reclassify all messages and write out the
# whole file again if at least one line has been reclassified
- #
worst = 0
last_worst_line = ''
reclassified_lines = []
diff --git a/web/htdocs/wato.py b/web/htdocs/wato.py
index 56681df..21f0a7b 100644
--- a/web/htdocs/wato.py
+++ b/web/htdocs/wato.py
@@ -2705,9 +2705,9 @@ def mode_object_parameters(phase):
forms.end()
-PARAMETERS_UNKNOW = []
+PARAMETERS_UNKNOWN = []
PARAMETERS_OMIT = []
-def output_analysed_ruleset(all_rulesets, rulespec, hostname, service, known_settings=PARAMETERS_UNKNOW):
+def output_analysed_ruleset(all_rulesets, rulespec, hostname, service, known_settings=PARAMETERS_UNKNOWN):
def rule_url(rule):
rule_folder, rule_nr = rule
return make_link([
@@ -2741,11 +2741,18 @@ def output_analysed_ruleset(all_rulesets, rulespec, hostname, service, known_set
# Show the resulting value or factory setting
html.write("<td class='settingvalue %s'>" % (len(rules) > 0 and "used" or "unused"))
- # In some cases we now the settings from a check_mk auomation
+ # In some cases we now the settings from a check_mk automation
if known_settings is PARAMETERS_OMIT:
return
- elif known_settings is not PARAMETERS_UNKNOW:
+ # Special handling for logwatch: The check parameter is always None. The actual
+ # patterns are configured in logwatch_rules. We do not have access to the actual
+ # patterns here but just to the useless "None". In order not to complicate things
+ # we simply display nothing here.
+ elif varname == "logwatch_rules":
+ pass
+
+ elif known_settings is not PARAMETERS_UNKNOWN:
html.write(valuespec.value_to_text(known_settings))
else:
Module: check_mk
Branch: master
Commit: 57bf329bc110f948c906712babfa39dfd0df4e85
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=57bf329bc110f9…
Author: Mathias Kettner <mk(a)mathias-kettner.de>
Date: Thu Sep 4 10:31:38 2014 +0200
Revert "#1383 Offline disks and required mirror free space are accounted in oracle_asm_diskgroup"
This reverts commit 82ea21342f0857672b787f1afb04a98899cad6b5.
---
.werks/1383 | 14 ----------
ChangeLog | 2 --
checkman/oracle_asm_diskgroup | 50 ++++++++-------------------------
checks/oracle_asm_diskgroup | 51 ++++------------------------------
web/plugins/wato/check_parameters.py | 22 ---------------
5 files changed, 17 insertions(+), 122 deletions(-)
diff --git a/.werks/1383 b/.werks/1383
deleted file mode 100644
index 2b676f2..0000000
--- a/.werks/1383
+++ /dev/null
@@ -1,14 +0,0 @@
-Title: Offline disks and rewuired mirror free space are accounted in oracle_asm_diskgroup
-Level: 2
-Component: checks
-Compatible: incomp
-Version: 1.2.5i6
-Date: 1409772890
-Class: feature
-
-New rule in WATO is requirred to account this change. The old
-<i>filesystem_default_levels</i> is replaced by <i>asm_diskgroup_default_levels</i>. WATO use
-the new rulename but old values must be migrated to
-<i>asm_diskgroup_default_levels</i>.
-
-
diff --git a/ChangeLog b/ChangeLog
index e69f11e..03a5fe7 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -43,8 +43,6 @@
* 1176 winperf_msx_queues: The list of counters for inventory can now be configured host based using wato
* 0656 brocade_fcport: inventory rule can now choose upon physical und operations states as well, state choices were also updated
* 1177 Hivemanger: New agent to check hivemanager devices
- * 1383 Offline disks and rewuired mirror free space are accounted in oracle_asm_diskgroup
- NOTE: Please refer to the migration notes!
* 1051 FIX: tcp_conn_stats: fix missing performance data...
* 1142 FIX: winperf_ts_sessions: fix computation, check has never really worked
* 1090 FIX: zfsget: fixed exception which happened on incomplete zfs entries
diff --git a/checkman/oracle_asm_diskgroup b/checkman/oracle_asm_diskgroup
index 51e0883..fc9bdce 100644
--- a/checkman/oracle_asm_diskgroup
+++ b/checkman/oracle_asm_diskgroup
@@ -4,44 +4,22 @@ catalog: app/oracle
license: GPL
distribution: check_mk
description:
- This check measures the usage of ASM disk groups. The usage
- is checked against a warning and a critical level, which
- can be specified in numerous ways.
+ Checks usage of ORACLE ASM Disk Groups.
- {Requirements:} The {mk_oracle} is needed on the monitored machines.
- See details for {mk_oracle} at manpage of {oracle_instance}.
+ Make sure you have installed the mk_oracle_asm agent plugin on the monitored
+ machines. (It uses asmcmd commandline tool there to gather the needed
+ informations.)
- {Trends:} This checks supports diskgroup {trends}. This means that the
- check is able to compute the {change} of the used space over the time
- and can make a forecast into the future. It can estimate the time when
- the diskgroup will be full.
-
- In a default configuration the check will compute the trend based on the
- data of the last 24 hours using a logarithmic average that gives more recent
- data a higher weight. Also data beyond the 24 hours will to some small degree be
- reflected in the computation. The advantage of this algorithm is a more
- precise prediction and a simpler implementation, which does not need any
- access to any RRDs or similar storage.
-
- Please note that when a diskgroup is started to be monitored,
- the trend of the past is unknown and is assumed to be {zero}.
- It will take at least one trend range of time until the trend
- approximately reflects the reality.
+ See also check manpage for df. All the configuration from there can
+ also be used for ORACLE ASM Disk Groups.
item:
- The name of the disk group as delivered by the agent without the {/} at the
- end.
-
- See also check manpage for {df}. All configurations are usable for ORACLE ASM
- disk groups.
-
- The old parameter {filesystem_default_levels} is replaced with
- {asm_diskgroup_default_levels}.
+ The name of the disk group as delivered by asmcmd.
examples:
# Set default levels for all filesystems (including ORACLE ASM Disk Groups)
# to 90/95
- asm_diskgroup_default_levels = (90, 95)
+ filesystem_default_levels = (90, 95)
# more examples in check manpage for df
@@ -49,17 +27,11 @@ perfdata:
see check manpage for df
inventory:
- Creates one check for every diskgroup in the agent's output.
+ Creates one check for every disk group in the agent's output.
[parameters]
-parameters(dict): see check manpage for df. The following parameters
- are added for more functionality.
-
- {"req_mir_free"}: If this is set to {True}, the free space is checked
- against the req_mir_free instead of free_mb. This works only for
- NORMAL or HIGH redundancy disk groups. {required mirror free space used}
- is added to the output, when {req_mir_free} is {True}
+parameters(dict): see check manpage for df
[configuration]
-asm_diskgroup_default_levels: see check manpage for df and comments above
+filesystem_default_levels: see check manpage for df
diff --git a/checks/oracle_asm_diskgroup b/checks/oracle_asm_diskgroup
index e380e23..f8ec66f 100644
--- a/checks/oracle_asm_diskgroup
+++ b/checks/oracle_asm_diskgroup
@@ -30,15 +30,6 @@
# MOUNTED NORMAL N 512 4096 1048576 3072 2146 309 918 0 Y OCR_VOTE/
# The agent section <<<oracle_asm_diskgroup>>> does not output the header line
-factory_settings["asm_diskgroup_default_levels"] = {
- "levels" : (80.0, 90.0), # warn/crit in percent
- "magic_normsize" : 20, # Standard size if 20 GB
- "levels_low" : (50.0, 60.0), # Never move warn level below 50% due to magic factor
- "trend_range" : 24,
- "trend_perfdata" : True, # do send performance data for trends
- "req_mir_free" : False, # Ignore Requirre mirror free space in DG
-}
-
def inventory_oracle_asm_diskgroup(info):
return [ (line[-1].rstrip("/"), {}) for line in info ]
@@ -55,40 +46,10 @@ def check_oracle_asm_diskgroup(item, params, info):
name = line
else:
name = ""
-
- dg_name = name.rstrip('/')
-
- if dg_name == item:
- if typ in ('NORMAL', 'HIGH') and params.get('req_mir_free'):
-
- req_mir_free_mb = saveint(req_mir_free_mb)
-
- if req_mir_free_mb < 0:
- # requirred mirror free space could be negative!
- req_mir_free_mb = 0
-
- status, infotext, perfdata = df_check_filesystem(g_hostname, item, int(total_mb),
- int(req_mir_free_mb), params)
- infotext += ', %s redundancy, requirred mirror free space used' % typ.lower()
-
- else:
- status, infotext, perfdata = df_check_filesystem(g_hostname, item, int(total_mb),
- int(free_mb), params)
- infotext += ', %s redundancy' % typ.lower()
-
- offline_disks = saveint(offline_disks)
- if offline_disks > 0:
- status = max(2, status)
- infotext += ', %d Offline disks found(!!)' % offline_disks
-
- return (status, infotext, perfdata)
-
- # In case of missing information we assume that the ASM-Instance is
- # checked at a later time.
- # This reduce false notifications for not running ASM-Instances
- raise MKCounterWrapped(None, "Diskgroup %s not found" % item)
-
->>>>>>> 7774f2a... mk_oracle_asm Offline disks and requirred mirror free space are accounted
+ if name.rstrip('/') == item:
+ return df_check_filesystem(g_hostname, item, int(total_mb),
+ int(free_mb), params)
+ return (3, "Disk group not found")
check_info["oracle_asm_diskgroup"] = {
@@ -96,7 +57,7 @@ check_info["oracle_asm_diskgroup"] = {
'inventory_function' : inventory_oracle_asm_diskgroup,
'service_description' : 'ASM Diskgroup %s',
'has_perfdata' : True,
- 'group' : 'asm_diskgroup',
- 'default_levels_variable' : 'asm_diskgroup_default_levels',
+ 'group' : 'filesystem',
+ 'default_levels_variable' : 'filesystem_default_levels',
"includes" : [ "df.include" ],
}
diff --git a/web/plugins/wato/check_parameters.py b/web/plugins/wato/check_parameters.py
index 8b2d523..b5712e8 100644
--- a/web/plugins/wato/check_parameters.py
+++ b/web/plugins/wato/check_parameters.py
@@ -3270,28 +3270,6 @@ register_check_parameters(
register_check_parameters(
subgroup_applications,
- "asm_diskgroup",
- _("ASM Diskgroup (used space and growth)"),
- Dictionary(
- elements = filesystem_elements + [
- ("req_mir_free", FixedValue(False, title = _("Use required mirror free space as free space"),
- totext = "",
- help = _("ASM calculates the free space depending on free_mb or require mirror "
- "free space. Enable this option to set the check against require "
- "mirror free space. This only works for normal or high redundancy. "))
- ),
- ],
- hidden_keys = ["flex_levels"],
- ),
- TextAscii(
- title = _("ASM-Diskgroups"),
- help = _("Specify the name of the ASM-Diskgroup "),
- allow_empty = False),
- "dict"
-)
-
-register_check_parameters(
- subgroup_applications,
"mssql_backup",
_("MSSQL Time since last Backup"),
Optional(
Module: check_mk
Branch: master
Commit: 4f74529aca5d5b3decee810e59ec9c2d6b9227b1
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=4f74529aca5d5b…
Author: Bastian Kuhn <bk(a)mathias-kettner.de>
Date: Wed Sep 3 18:10:43 2014 +0200
Hivemanager: few improvments
---
agents/special/agent_hivemanager | 58 ++++++++++++++++++++++++
checkman/hivemanager_devices | 20 ++++++++
checks/{agent_hivmanager => agent_hivemanager} | 2 +-
3 files changed, 79 insertions(+), 1 deletion(-)
diff --git a/agents/special/agent_hivemanager b/agents/special/agent_hivemanager
new file mode 100755
index 0000000..b4145cd
--- /dev/null
+++ b/agents/special/agent_hivemanager
@@ -0,0 +1,58 @@
+#!/usr/bin/python
+# -*- encoding: utf-8; py-indent-offset: 4 -*-
+# +------------------------------------------------------------------+
+# | ____ _ _ __ __ _ __ |
+# | / ___| |__ ___ ___| | __ | \/ | |/ / |
+# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
+# | | |___| | | | __/ (__| < | | | | . \ |
+# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
+# | |
+# | Copyright Mathias Kettner 2014 mk(a)mathias-kettner.de |
+# +------------------------------------------------------------------+
+#
+# This file is part of Check_MK.
+# The official homepage is at http://mathias-kettner.de/check_mk.
+#
+# check_mk is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation in version 2. check_mk is distributed
+# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
+# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE. See the GNU General Public License for more de-
+# ails. You should have received a copy of the GNU General Public
+# License along with GNU Make; see the file COPYING. If not, write
+# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+# Boston, MA 02110-1301 USA.
+
+import sys
+from urllib2 import urlopen
+import urllib2, base64, json
+
+try:
+ user = sys.argv[1]
+ password = sys.argv[2]
+except:
+ print "Usage: agent_hivemanager <USERNAME> <PASSWORD>"
+ sys.exit(2)
+
+def get_url(address):
+ request = urllib2.Request(address)
+ base64string = base64.encodestring('%s:%s' % (user, password)).replace('\n', '')
+ request.add_header("Authorization", "Basic %s" % base64string)
+ request.add_header("Content-Type", "application/json")
+ try:
+ return urllib2.urlopen(request)
+ except urllib2.HTTPError, e:
+ print e
+ sys.exit(2)
+
+print "<<<check_mk>>>"
+print "Version: 1.2.5"
+print "AgentOs: Hivemanager"
+
+print "<<<hivemanager_devices:sep(124)>>>"
+address = "https://login.hivemanager.ch/hm/api/v1/devices"
+for line in json.loads(get_url(address).read()):
+ if line['upTime'] == '':
+ line['upTime'] = "down"
+ print "|".join(map(str, [ line['hostName'], line['clients'], line['alarm'], line['connection'], line['upTime'] ]))
diff --git a/checkman/hivemanager_devices b/checkman/hivemanager_devices
new file mode 100644
index 0000000..8affa8f
--- /dev/null
+++ b/checkman/hivemanager_devices
@@ -0,0 +1,20 @@
+title: Hivemanager Devices
+agents: special
+catalog: hw/network/hivemanager
+license: GPL
+distribution: check_mk
+description:
+ This check connects to a Hivemanager Account and get a list of all connected devices.
+ Then the connection state and the alarm state will be checked. Also it's possible to set levels for
+ the maximum number of connected clients and for the maximum Uptime of a client.
+ To connect to the Hivemanager a special Datasource
+ Program is used. You can find it under the name <i>Hivemanager</i> in Wato -> Datasource Programs.
+
+item:
+ The name of the Devices
+
+perfdata:
+ One variable: the current number of clients
+
+inventory:
+ One check for each device will be created
diff --git a/checks/agent_hivmanager b/checks/agent_hivemanager
similarity index 94%
rename from checks/agent_hivmanager
rename to checks/agent_hivemanager
index 801a072..cbdd5f0 100644
--- a/checks/agent_hivmanager
+++ b/checks/agent_hivemanager
@@ -26,6 +26,6 @@
def agent_hivemanager_arguments(params, hostname, ipaddress):
# User, Password
- return "%s %s" % ( params[0], params[1] )
+ return "%s %s" % ( quote_shell_string(params[0]), quote_shell_string(params[1]) )
special_agent_info['hivemanager'] = agent_hivemanager_arguments
Module: check_mk
Branch: master
Commit: 6958dd6fdce8ccfc8482dd27b0d2d8da8ee80715
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=6958dd6fdce8cc…
Author: Mathias Kettner <mk(a)mathias-kettner.de>
Date: Wed Sep 3 15:50:09 2014 +0200
#1394 HTML notifications have a new content field for debugging variables
When configuring notifications and customizing templates then you might often
wonder, which variables are present and which values they have. In the HTML
email notifications you can now enable the new field <i>Complete variable list</i>
in the <i>Information to be displayed in the email body<i/>. This adds a table
with all avaiable variables and their current settings in the notification
in question.
---
.werks/1394 | 14 +++++++++++++
ChangeLog | 3 ++-
notifications/mail | 39 +++++++++++++++++++++++++++++++++++++
web/plugins/wato/notifications.py | 17 ++++++++--------
4 files changed, 64 insertions(+), 9 deletions(-)
diff --git a/.werks/1394 b/.werks/1394
new file mode 100644
index 0000000..0080009
--- /dev/null
+++ b/.werks/1394
@@ -0,0 +1,14 @@
+Title: HTML notifications have a new content field for debugging variables
+Level: 2
+Component: notifications
+Compatible: compat
+Version: 1.2.5i6
+Date: 1409752076
+Class: feature
+
+When configuring notifications and customizing templates then you might often
+wonder, which variables are present and which values they have. In the HTML
+email notifications you can now enable the new field <i>Complete variable list</i>
+in the <i>Information to be displayed in the email body<i/>. This adds a table
+with all avaiable variables and their current settings in the notification
+in question.
diff --git a/ChangeLog b/ChangeLog
index 3ac4fee..2debae7 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -69,7 +69,7 @@
NOTE: Please refer to the migration notes!
* 1102 FIX: esx_vsphere_counters: no longer raise false alarms because of invalid data from ESX Host...
* 1149 FIX: check_mk-ibm_svc_systemstats.diskio, check_mk-ibm_svc_systemstats.iops: fix exception in Perf-O-Meter
- * 0651 FIX: f5_bigip_interfaces: indentation error and oid to determine the interface state fixed...
+ * 0651 FIX: f5_bigip_interfaces: Fix invalid throughput values, detect newer F5 devices...
* 1393 FIX: casa_cpu_temp, casa_cpu_util: Change service description to standard...
NOTE: Please refer to the migration notes!
@@ -95,6 +95,7 @@
Notifications:
* 1151 Add variables (HOST/SERVICE)ACK(AUTHOR/COMMENT) to notification context...
+ * 1394 HTML notifications have a new content field for debugging variables...
* 1156 FIX: Graphs in HTML mails are now sent again where they where missing...
* 1157 FIX: Fixed SMS plugin on at least debian (distrs which have no sendsms/smssend)...
diff --git a/notifications/mail b/notifications/mail
index 06c0408..57f4b28 100755
--- a/notifications/mail
+++ b/notifications/mail
@@ -170,6 +170,17 @@ td.graphs {
img {
margin-right: 10px;
}
+
+table.context {
+ border-collapse: collapse;
+}
+
+table.context td {
+ border: 1px solid #888;
+ padding: 3px 8px;
+}
+
+
</style>
</head>
<body>
@@ -261,6 +272,12 @@ body_elements = [
( "perfdata", "service", False, "Performance Data",
"$SERVICEPERFDATA$",
"$SERVICEPERFDATA$",),
+
+ # Debugging
+ ( "context", "both", False, "Complete variable list",
+ "$CONTEXT_ASCII$",
+ "$CONTEXT_HTML$",
+ )
]
tmpl_host_subject = 'Check_MK: $HOSTNAME$ - $EVENT_TXT$'
@@ -278,10 +295,32 @@ def substitute_context(template, context):
for varname, value in context.items():
template = template.replace('$'+varname+'$', value)
+ # Debugging of variables. Create content only on demand
+ if "$CONTEXT_ASCII$" in template or "$CONTEXT_HTML$" in template:
+ template = replace_variable_context(template, context)
+
# Remove the rest of the variables and make them empty
template = re.sub("\$[A-Z_][A-Z_0-9]*\$", "", template)
return template
+
+def replace_variable_context(template, context):
+ ascii_output = ""
+ html_output = "<table class=context>\n"
+ elements = context.items()
+ elements.sort()
+ for varname, value in elements:
+ ascii_output += "%s=%s\n" % (varname, value)
+ html_output += "<tr><td class=varname>%s</td><td class=value>%s</td></tr>\n" % (
+ varname, encode_entities(value))
+ html_output += "</table>\n"
+ return template.replace("$CONTEXT_ASCII$", ascii_output).replace("$CONTEXT_HTML$", html_output)
+
+
+def encode_entities(text):
+ return text.replace("&", "&").replace("<", "<").replace(">", ">")
+
+
def multipart_mail(target, subject, from_address, reply_to, content_txt, content_html, attach = []):
m = MIMEMultipart('related', _charset='utf-8')
diff --git a/web/plugins/wato/notifications.py b/web/plugins/wato/notifications.py
index b120dd1..33d8830 100644
--- a/web/plugins/wato/notifications.py
+++ b/web/plugins/wato/notifications.py
@@ -63,14 +63,15 @@ register_notification_parameters("mail",
ListChoice(
title = _("Information to be displayed in the email body"),
choices = [
- ( "address", _("IP Address of Host") ),
- ( "abstime", _("Absolute Time of Alert") ),
- ( "reltime", _("Relative Time of Alert") ),
- ( "longoutput", _("Additional Plugin Output") ),
- ( "ack_author", _("Acknowledgement Author") ),
- ( "ack_comment", _("Acknowledgement Comment") ),
- ( "perfdata", _("Performance Data") ),
- ( "graph", _("Performance Graphs") ),
+ ( "address", _("IP Address of Host") ),
+ ( "abstime", _("Absolute Time of Alert") ),
+ ( "reltime", _("Relative Time of Alert") ),
+ ( "longoutput", _("Additional Plugin Output") ),
+ ( "ack_author", _("Acknowledgement Author") ),
+ ( "ack_comment", _("Acknowledgement Comment") ),
+ ( "perfdata", _("Performance Data") ),
+ ( "graph", _("Performance Graphs") ),
+ ( "context", _("Complete variable list (for testing)" ) ),
],
default_value = [ "perfdata", "graph", "abstime", "address", "longoutput" ],
)