Module: check_mk
Branch: master
Commit: c070cb27586ea9e644e5ef3c5576019e69ccea45
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=c070cb27586ea9…
Author: Sven Panne <sp(a)mathias-kettner.de>
Date: Thu Jun 14 09:31:08 2018 +0200
Consistently use dictionaries for contexts when loading checks/plugins.
The previous list/dict mix was simply confusing. In addition, it was even
less efficient.
Change-Id: If1130f31409f72c9186b083c8aeede3e87a23abf
---
cmk_base/check_api.py | 8 ++++++--
cmk_base/checks.py | 11 +++--------
2 files changed, 9 insertions(+), 10 deletions(-)
diff --git a/cmk_base/check_api.py b/cmk_base/check_api.py
index 6470c4b..c3f0caf 100644
--- a/cmk_base/check_api.py
+++ b/cmk_base/check_api.py
@@ -104,7 +104,7 @@ import cmk_base.prediction as _prediction
def _get_check_context():
"""This is called from cmk_base code to get the Check API things. Don't
use this from checks."""
- return [ (k, v) for k, v in globals().items() if k[0] != "_" ]
+ return {k: v for k, v in globals().iteritems() if not k.startswith("_")}
#.
# .--Check API-----------------------------------------------------------.
@@ -499,4 +499,8 @@ def _agent_cache_file_age(hostname, check_plugin_name):
return None
-__all__ = dict(_get_check_context()).keys()
+# NOTE: Currently this is not really needed, it is just here to keep any start
+# import in sync with our intended API.
+# TODO: Do we really need this? Is there code which uses a star import for this
+# module?
+__all__ = _get_check_context().keys()
diff --git a/cmk_base/checks.py b/cmk_base/checks.py
index 2ab106d..ebdcd93 100644
--- a/cmk_base/checks.py
+++ b/cmk_base/checks.py
@@ -249,14 +249,9 @@ def new_check_context():
"active_check_info" : active_check_info,
"special_agent_info" : special_agent_info,
}
-
- # Add the Check API
- #
- # For better separation it would be better to copy the check API objects, but
- # this might consume too much memory. So we simply reference it.
- for k, v in check_api._get_check_context():
- context[k] = v
-
+ # NOTE: For better separation it would be better to copy the values, but
+ # this might consume too much memory, so we simply reference them.
+ context.update(check_api._get_check_context())
return context
Module: check_mk
Branch: master
Commit: b4751f09850c77cc128ab9a09b41fb460d3363ab
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=b4751f09850c77…
Author: Sven Panne <sp(a)mathias-kettner.de>
Date: Thu Jun 14 09:14:41 2018 +0200
Simplified inventory context calculation.
Change-Id: Ib34671688e9e49b7662e21665ff13d950d380ad9
---
cmk_base/inventory.py | 8 ++++----
cmk_base/inventory_plugins.py | 19 ++++++-------------
2 files changed, 10 insertions(+), 17 deletions(-)
diff --git a/cmk_base/inventory.py b/cmk_base/inventory.py
index e394a50..b289df3 100644
--- a/cmk_base/inventory.py
+++ b/cmk_base/inventory.py
@@ -385,7 +385,7 @@ def _run_inventory_export_hooks(hostname, inventory_tree):
# '----------------------------------------------------------------------'
def get_inventory_context():
- return [
- ("inv_tree_list", inv_tree_list),
- ("inv_tree", inv_tree),
- ]
+ return {
+ "inv_tree_list": inv_tree_list,
+ "inv_tree": inv_tree,
+ }
diff --git a/cmk_base/inventory_plugins.py b/cmk_base/inventory_plugins.py
index c9465dd..2b87058 100644
--- a/cmk_base/inventory_plugins.py
+++ b/cmk_base/inventory_plugins.py
@@ -86,14 +86,12 @@ def _new_inv_context(get_inventory_context):
"inv_info" : inv_info,
"inv_export" : inv_export,
}
-
- # Add the inventory plugin and check API
- #
- # For better separation it would be better to copy the check API objects, but
- # this might consume too much memory. So we simply reference it.
- for k, v in check_api._get_check_context() + get_inventory_context():
- context[k] = v
-
+ # NOTE: For better separation it would be better to copy the values, but
+ # this might consume too much memory, so we simply reference them.
+ # NOTE: It is possible that check includes are included, so we need the
+ # usual check context here, too.
+ context.update(checks.new_check_context())
+ context.update(get_inventory_context())
return context
@@ -112,11 +110,6 @@ def _load_plugin_includes(check_file_path, plugin_context):
if not os.path.exists(include_file_path):
include_file_path = checks.check_include_file_path(include_file_name)
- # In case a check include file is used the plugin context needs to be
- # prepared with a check plugin context
- for key, val in checks.new_check_context().items():
- plugin_context.setdefault(key, val)
-
try:
execfile(include_file_path, plugin_context)
except Exception, e:
Module: check_mk
Branch: master
Commit: 36c79860a5cd1a730ad513c8003cebbe1cb0243e
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=36c79860a5cd1a…
Author: Jukka Aro <ja(a)mathias-kettner.de>
Date: Tue Jun 12 10:36:33 2018 +0200
6190 FIX Win-agent: prevent unsigned integer overflow in process uptime
The process uptimes for Windows are calculated by subtracting the process
creation time from the current system time. Under certain circumstances,
setting up the system clock e. g. with daylight saving time has led to
some processes reporting a creation time with false offset and an unsigned
integer overflow through negative subtraction result. This has further led
to the crash of the ps check.
The unsigned integer overflow is now prevented by checking the result of the
subtraction and, in case of a negative value, logging it as an error and
setting the process uptime to the default value 1.
Change-Id: I4cbefc5e500880594be0f385d86cc3f1959ce683
---
.werks/6190 | 20 ++++++++++++++++++++
agents/windows/build_version | 2 +-
agents/windows/sections/SectionPS.cc | 26 ++++++++++++++++++++++----
3 files changed, 43 insertions(+), 5 deletions(-)
diff --git a/.werks/6190 b/.werks/6190
new file mode 100644
index 0000000..369b213
--- /dev/null
+++ b/.werks/6190
@@ -0,0 +1,20 @@
+Title: Win-agent: prevent unsigned integer overflow in process uptime
+Level: 1
+Component: checks
+Compatible: compat
+Edition: cre
+Version: 1.6.0i1
+Date: 1528807253
+Class: fix
+
+The process uptimes for Windows are calculated by subtracting the process
+creation time from the current system time. Under certain circumstances,
+setting up the system clock e. g. with daylight saving time has led to a
+situation where some processes have reported a creation time with false
+offset and an unsigned integer overflow through negative subtraction result.
+This has further led to the crash of the ps check.
+
+The unsigned integer overflow is now prevented by checking the result of the
+subtraction and, in case of a negative value, logging it as an error and
+setting the process uptime to the default value 1.
+
diff --git a/agents/windows/build_version b/agents/windows/build_version
index b912429..49f2b70 100644
--- a/agents/windows/build_version
+++ b/agents/windows/build_version
@@ -1 +1 @@
-3254
+3256
diff --git a/agents/windows/sections/SectionPS.cc b/agents/windows/sections/SectionPS.cc
index 2b07008..e8dfa7b 100644
--- a/agents/windows/sections/SectionPS.cc
+++ b/agents/windows/sections/SectionPS.cc
@@ -218,8 +218,17 @@ bool SectionPS::outputWMI(std::ostream &out) {
std::tm t;
ss >> std::get_time(&t, L"%Y%m%d%H%M%S");
time_t creation_time = mktime(&t);
- auto uptime = static_cast<ULONGLONG>(
- section_helpers::current_time() - creation_time);
+ // Cope with possible problems with process creation time. Ensure
+ // that the result of subtraction is not negative.
+ long long currTime = section_helpers::current_time();
+ long long timeDiff = currTime - creation_time;
+
+ if (timeDiff < 0) {
+ Error(_logger) << "Creation time " << creation_time
+ << " lies ahead of current time " << currTime;
+ }
+
+ auto uptime = static_cast<ULONGLONG>(std::max(timeDiff, 1LL));
outputProcess(
out, std::stoull(result.get<std::string>(L"VirtualSize")),
@@ -329,8 +338,17 @@ bool SectionPS::outputNative(std::ostream &out) {
}
// Uptime
- auto uptime = static_cast<unsigned long long>(
- section_helpers::current_time() - sinceEpoch(createTime));
+ // Cope with possible problems with process creation time. Ensure
+ // that the result of subtraction is not negative.
+ long long currTime = section_helpers::current_time();
+ long long timeDiff = currTime - sinceEpoch(createTime);
+
+ if (timeDiff < 0) {
+ Error(_logger) << "Creation time " << sinceEpoch(createTime)
+ << " lies ahead of current time " << currTime;
+ }
+
+ auto uptime = static_cast<unsigned long long>(std::max(timeDiff, 1LL));
// Note: CPU utilization is determined out of usermodetime and
// kernelmodetime
Module: check_mk
Branch: master
Commit: 072aaaea9696c33ba638dedd4c7279b43384d436
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=072aaaea9696c3…
Author: Sven Panne <sp(a)mathias-kettner.de>
Date: Tue Jun 12 10:39:35 2018 +0200
Removed a piggyback-related import cycle.
Although this removes a cycle, pylint now shows 7 cycles: For some obscure
reason it finds 2 more cycles now, but those are not related to this commit.
We really need to investigate what's goning on here, but to do this we
should really, really disentangle pytest and pylint, the current state is
totally arcane and not comprehensible at all... :-/
Back to the commit itself: To break the cycle, we thread through a
parameter, although this is a bit verbose and breaks the piggyback
"abstraction" a bit. Nevertheless, breaking dependency cycles is *far* more
important than this tiny ugliness, and what we really want here is a class:
This would fix both problems at once, and probably many more...
Change-Id: I890f8d5666f5dd4551a2c52ae00fa7d05e8eb807
---
cmk_base/check_table.py | 2 +-
cmk_base/config.py | 2 +-
cmk_base/data_sources/piggyback.py | 16 ++++++++++------
cmk_base/modes/check_mk.py | 4 ++--
cmk_base/piggyback.py | 37 ++++++++++++++++++-------------------
5 files changed, 32 insertions(+), 29 deletions(-)
diff --git a/cmk_base/check_table.py b/cmk_base/check_table.py
index cca9a11..9a737b5 100644
--- a/cmk_base/check_table.py
+++ b/cmk_base/check_table.py
@@ -84,7 +84,7 @@ def get_check_table(hostname, remove_duplicates=False, use_cache=True,
if not config.is_snmp_host(hostname) and cmk_base.check_utils.is_snmp_check(checkname) and \
(not config.has_management_board(hostname) or config.management_protocol_of(hostname) != "snmp"):
passed = False
- if not config.is_tcp_host(hostname) and not piggyback.has_piggyback_raw_data(hostname) \
+ if not config.is_tcp_host(hostname) and not piggyback.has_piggyback_raw_data(config.piggyback_max_cachefile_age, hostname) \
and cmk_base.check_utils.is_tcp_check(checkname):
passed = False
is_checkname_valid_cache[the_id] = passed
diff --git a/cmk_base/config.py b/cmk_base/config.py
index 6984ad2..08576a4 100644
--- a/cmk_base/config.py
+++ b/cmk_base/config.py
@@ -849,7 +849,7 @@ def is_ping_host(hostname):
import cmk_base.piggyback as piggyback
return not is_snmp_host(hostname) \
and not is_tcp_host(hostname) \
- and not piggyback.has_piggyback_raw_data(hostname) \
+ and not piggyback.has_piggyback_raw_data(piggyback_max_cachefile_age, hostname) \
and not has_management_board(hostname)
diff --git a/cmk_base/data_sources/piggyback.py b/cmk_base/data_sources/piggyback.py
index 154bf2f..424f63c 100644
--- a/cmk_base/data_sources/piggyback.py
+++ b/cmk_base/data_sources/piggyback.py
@@ -26,26 +26,30 @@
import os
-import cmk.paths
+from cmk.paths import tmp_dir
-import cmk_base.piggyback as piggyback
+from cmk_base.config import piggyback_max_cachefile_age
+from cmk_base.piggyback import get_piggyback_raw_data
from .abstract import CheckMKAgentDataSource
+
+def _raw_data(name):
+ return get_piggyback_raw_data(piggyback_max_cachefile_age, name)
+
+
class PiggyBackDataSource(CheckMKAgentDataSource):
def id(self):
return "piggyback"
def describe(self):
- path = os.path.join(cmk.paths.tmp_dir, "piggyback", self._hostname)
+ path = os.path.join(tmp_dir, "piggyback", self._hostname)
return "Process piggyback data from %s" % path
def _execute(self):
- return piggyback.get_piggyback_raw_data(self._hostname) \
- + piggyback.get_piggyback_raw_data(self._ipaddress)
-
+ return _raw_data(self._hostname) + _raw_data(self._ipaddress)
def _get_raw_data(self):
"""Returns the current raw data of this data source
diff --git a/cmk_base/modes/check_mk.py b/cmk_base/modes/check_mk.py
index 243d8e5..8d9ab62 100644
--- a/cmk_base/modes/check_mk.py
+++ b/cmk_base/modes/check_mk.py
@@ -709,8 +709,8 @@ modes.register(Mode(
# '----------------------------------------------------------------------'
def mode_cleanup_piggyback():
- import cmk_base.piggyback
- cmk_base.piggyback.cleanup_piggyback_files()
+ from cmk_base.piggyback import cleanup_piggyback_files
+ cleanup_piggyback_files(config.piggyback_max_cachefile_age)
modes.register(Mode(
long_option="cleanup-piggyback",
diff --git a/cmk_base/piggyback.py b/cmk_base/piggyback.py
index ecf6eef..faac4df 100644
--- a/cmk_base/piggyback.py
+++ b/cmk_base/piggyback.py
@@ -33,26 +33,25 @@ from cmk.exceptions import MKGeneralException
import cmk_base.utils
import cmk_base.console as console
-import cmk_base.config as config
-def get_piggyback_raw_data(hostname):
+def get_piggyback_raw_data(piggyback_max_cachefile_age, hostname):
output = ""
if not hostname:
return output
- for sourcehost, file_path in _get_piggyback_files(hostname):
+ for sourcehost, file_path in _get_piggyback_files(piggyback_max_cachefile_age, hostname):
console.verbose("Using piggyback raw data from host %s.\n" % sourcehost)
output += file(file_path).read()
return output
-def has_piggyback_raw_data(hostname):
- return _get_piggyback_files(hostname) != []
+def has_piggyback_raw_data(piggyback_max_cachefile_age, hostname):
+ return _get_piggyback_files(piggyback_max_cachefile_age, hostname) != []
-def _get_piggyback_files(hostname):
+def _get_piggyback_files(piggyback_max_cachefile_age, hostname):
"""Gather a list of piggyback files to read for further processing.
Please note that there may be multiple parallel calls executing the
@@ -87,9 +86,9 @@ def _get_piggyback_files(hostname):
continue # File might've been deleted. That's ok.
# Skip piggyback files that are outdated at all
- if file_age > config.piggyback_max_cachefile_age:
+ if file_age > piggyback_max_cachefile_age:
console.verbose("Piggyback file %s is outdated (%d seconds too old). Skip processing.\n" %
- (file_path, file_age - config.piggyback_max_cachefile_age))
+ (file_path, file_age - piggyback_max_cachefile_age))
continue
# Skip piggyback files that have not been updated in the last contact
@@ -151,13 +150,13 @@ def store_piggyback_raw_data(sourcehost, piggybacked_raw_data):
remove_source_status_file(sourcehost)
-def cleanup_piggyback_files():
+def cleanup_piggyback_files(piggyback_max_cachefile_age):
"""This is a housekeeping job to clean up different old files from the
piggyback directories.
# Cleanup piggyback data of hosts that are not sending piggyback data anymore
# a) hosts that have a file below piggyback_sources:
- # -> check age of the file and remove it once it reached config.piggyback_max_cachefile_age
+ # -> check age of the file and remove it once it reached piggyback_max_cachefile_age
# b) hosts that don't have a file below piggyback_sources (old version or removed by step "a)"):
# -> remove all piggyback_raw_data files created by this source
@@ -168,11 +167,11 @@ def cleanup_piggyback_files():
functions. Therefor all these functions needs to deal with suddenly vanishing or
updated files/directories.
"""
- _cleanup_old_source_status_files()
- _cleanup_old_piggybacked_files()
+ _cleanup_old_source_status_files(piggyback_max_cachefile_age)
+ _cleanup_old_piggybacked_files(piggyback_max_cachefile_age)
-def _cleanup_old_source_status_files():
+def _cleanup_old_source_status_files(piggyback_max_cachefile_age):
base_dir = os.path.join(cmk.paths.tmp_dir, "piggyback_sources")
for entry in os.listdir(base_dir):
if entry[0] == ".":
@@ -185,11 +184,11 @@ def _cleanup_old_source_status_files():
except MKGeneralException, e:
continue # File might've been deleted. That's ok.
- if file_age > config.piggyback_max_cachefile_age:
+ if file_age > piggyback_max_cachefile_age:
console.verbose("Removing outdated piggyback source status file %s\n" % file_path)
_remove_piggyback_file(file_path)
-def _cleanup_old_piggybacked_files():
+def _cleanup_old_piggybacked_files(piggyback_max_cachefile_age):
"""Remove piggyback data that is not needed anymore
The monitoring (_get_piggyback_files()) is already skipping these files,
@@ -214,7 +213,7 @@ def _cleanup_old_piggybacked_files():
file_path = os.path.join(backed_host_dir_path, source_host_name)
- delete_reason = _shall_cleanup_piggyback_file(file_path, source_host_name, keep_sources)
+ delete_reason = _shall_cleanup_piggyback_file(piggyback_max_cachefile_age, file_path, source_host_name, keep_sources)
if delete_reason:
console.verbose("Removing outdated piggyback file (%s) %s\n" % (delete_reason, file_path))
_remove_piggyback_file(file_path)
@@ -229,7 +228,7 @@ def _cleanup_old_piggybacked_files():
raise
-def _shall_cleanup_piggyback_file(file_path, source_host_name, keep_sources):
+def _shall_cleanup_piggyback_file(piggyback_max_cachefile_age, file_path, source_host_name, keep_sources):
if source_host_name not in keep_sources:
return "Source not sending piggyback data"
@@ -239,8 +238,8 @@ def _shall_cleanup_piggyback_file(file_path, source_host_name, keep_sources):
return None # File might've been deleted. That's ok.
# Skip piggyback files that are outdated at all
- if file_age > config.piggyback_max_cachefile_age:
- return "%d seconds too old" % (file_age - config.piggyback_max_cachefile_age)
+ if file_age > piggyback_max_cachefile_age:
+ return "%d seconds too old" % (file_age - piggyback_max_cachefile_age)
# Skip piggyback files that have not been updated in the last contact
# with the source host that is currently being handled.