Module: check_mk
Branch: master
Commit: 8aa7c93c4455a1dbb3c4cf586377cbd35e9109aa
URL:
http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=8aa7c93c4455a1…
Author: Sven Panne <sp(a)mathias-kettner.de>
Date: Wed May 16 13:26:20 2018 +0200
Decouple main/history modules a bit more. Make future class structure obvious.
Change-Id: Ife5d4439b3741cec2ed8d1b7fbbd3e9e38616e5e
---
cmk/ec/history.py | 78 ++++++++++++++++++++++++++++++++++++++-----------------
cmk/ec/main.py | 14 +++-------
2 files changed, 57 insertions(+), 35 deletions(-)
diff --git a/cmk/ec/history.py b/cmk/ec/history.py
index 6986b48..759ed31 100644
--- a/cmk/ec/history.py
+++ b/cmk/ec/history.py
@@ -32,6 +32,46 @@ import time
import cmk.ec.actions
import cmk.render
+
+# TODO: As one can see clearly below, we should really have a class hierarchy here...
+
+def configure_event_history(settings, config, mongodb):
+ if config['archive_mode'] == 'mongodb':
+ # Configure the auto deleting indexes in the DB
+ _update_mongodb_indexes(settings, mongodb)
+ _update_mongodb_history_lifetime(settings, config, mongodb)
+ else:
+ pass
+
+
+def flush_event_history(settings, config, logger, lock_history, mongodb):
+ if config['archive_mode'] == 'mongodb':
+ _flush_event_history_mongodb(mongodb)
+ else:
+ _expire_logfiles(settings, config, logger, lock_history, True)
+
+
+def log_event_history(settings, config, logger, lock_history, mongodb,
active_history_period, table_events, event, what, who="",
addinfo=""):
+ if config['archive_mode'] == 'mongodb':
+ _log_event_history_to_mongodb(settings, config, logger, event, what, who,
addinfo, mongodb)
+ else:
+ _log_event_history_to_file(settings, config, logger, lock_history,
active_history_period, table_events, event, what, who, addinfo)
+
+
+def get_event_history(settings, config, mongodb, table_events, table_history, logger,
query):
+ if config['archive_mode'] == 'mongodb':
+ return _get_event_history_from_mongodb(settings, table_events, query, mongodb)
+ else:
+ return _get_event_history_from_file(settings, table_history, query, logger)
+
+
+def history_housekeeping(settings, config, logger, lock_history):
+ if config['archive_mode'] == 'mongodb':
+ pass
+ else:
+ _expire_logfiles(settings, config, logger, lock_history, False)
+
+
#.
# .--MongoDB-------------------------------------------------------------.
# | __ __ ____ ____ |
@@ -92,7 +132,7 @@ def _get_mongodb_max_history_age(mongodb):
return result['dt_-1']['expireAfterSeconds']
-def update_mongodb_indexes(settings, mongodb):
+def _update_mongodb_indexes(settings, mongodb):
if not mongodb.connection:
_connect_mongodb(settings, mongodb)
result = mongodb.db.ec_archive.index_information()
@@ -101,7 +141,7 @@ def update_mongodb_indexes(settings, mongodb):
mongodb.db.ec_archive.ensure_index([('time', DESCENDING)])
-def update_mongodb_history_lifetime(settings, config, mongodb):
+def _update_mongodb_history_lifetime(settings, config, mongodb):
if not mongodb.connection:
_connect_mongodb(settings, mongodb)
@@ -139,7 +179,8 @@ def _mongodb_next_id(mongodb, name, first_id=0):
return ret['seq']
-def _log_event_history_to_mongodb(settings, event, what, who, addinfo, mongodb):
+def _log_event_history_to_mongodb(settings, config, logger, event, what, who, addinfo,
mongodb):
+ _log_event(config, logger, event, what, who, addinfo)
if not mongodb.connection:
_connect_mongodb(settings, mongodb)
# We converted _id to be an auto incrementing integer. This makes the unique
@@ -158,7 +199,12 @@ def _log_event_history_to_mongodb(settings, event, what, who,
addinfo, mongodb):
})
-def get_event_history_from_mongodb(settings, table_events, query, mongodb):
+def _log_event(config, logger, event, what, who, addinfo):
+ if config['debug_rules']:
+ logger.info("Event %d: %s/%s/%s - %s" % (event["id"], what,
who, addinfo, event["text"]))
+
+
+def _get_event_history_from_mongodb(settings, table_events, query, mongodb):
filters, limit = query.filters, query.limit
history_entries = []
@@ -240,16 +286,6 @@ def get_event_history_from_mongodb(settings, table_events, query,
mongodb):
# | Functions for logging the history of events |
# '----------------------------------------------------------------------'
-def log_event_history(settings, config, logger, lock_history, mongodb,
active_history_period, table_events, event, what, who="",
addinfo=""):
- if config["debug_rules"]:
- logger.info("Event %d: %s/%s/%s - %s" % (event["id"], what,
who, addinfo, event["text"]))
-
- if config['archive_mode'] == 'mongodb':
- _log_event_history_to_mongodb(settings, event, what, who, addinfo, mongodb)
- else:
- _log_event_history_to_file(settings, config, lock_history, active_history_period,
table_events, event, what, who, addinfo)
-
-
# Make a new entry in the event history. Each entry is tab-separated line
# with the following columns:
# 0: time of log entry
@@ -257,7 +293,8 @@ def log_event_history(settings, config, logger, lock_history, mongodb,
active_hi
# 2: user who initiated the action (for GUI actions)
# 3: additional information about the action
# 4-oo: StatusTableEvents.columns
-def _log_event_history_to_file(settings, config, lock_history, active_history_period,
table_events, event, what, who, addinfo):
+def _log_event_history_to_file(settings, config, logger, lock_history,
active_history_period, table_events, event, what, who, addinfo):
+ _log_event(config, logger, event, what, who, addinfo)
with lock_history:
columns = [
str(time.time()),
@@ -331,7 +368,7 @@ def _current_history_period(config):
# Delete old log files
-def expire_logfiles(settings, config, logger, lock_history, flush):
+def _expire_logfiles(settings, config, logger, lock_history, flush):
with lock_history:
try:
days = config["history_lifetime"]
@@ -349,14 +386,7 @@ def expire_logfiles(settings, config, logger, lock_history, flush):
logger.exception("Error expiring log files: %s" % e)
-def flush_event_history(settings, config, logger, lock_history, mongodb):
- if config['archive_mode'] == 'mongodb':
- _flush_event_history_mongodb(mongodb)
- else:
- expire_logfiles(settings, config, logger, lock_history, True)
-
-
-def get_event_history_from_file(settings, table_history, query, logger):
+def _get_event_history_from_file(settings, table_history, query, logger):
filters, limit = query.filters, query.limit
history_entries = []
if not settings.paths.history_dir.value.exists():
diff --git a/cmk/ec/main.py b/cmk/ec/main.py
index 13deb79..37e1a71 100644
--- a/cmk/ec/main.py
+++ b/cmk/ec/main.py
@@ -1065,9 +1065,7 @@ class EventServer(ECServerThread):
self.hk_handle_event_timeouts()
self.hk_check_expected_messages()
self.hk_cleanup_downtime_events()
-
- if self._config['archive_mode'] != 'mongodb':
- cmk.ec.history.expire_logfiles(self.settings, self._config, self._logger,
self._lock_history, False)
+ cmk.ec.history.history_housekeeping(self.settings, self._config, self._logger,
self._lock_history)
# For all events that have been created in a host downtime check the host
# whether or not it is still in downtime. In case the downtime has ended
@@ -2752,10 +2750,7 @@ class StatusTableHistory(StatusTable):
self._logger = logger
def _enumerate(self, query):
- if self._config['archive_mode'] == 'mongodb':
- return cmk.ec.history.get_event_history_from_mongodb(self.settings,
self._table_events, query, self._mongodb)
- else:
- return cmk.ec.history.get_event_history_from_file(self.settings, self, query,
self._logger)
+ cmk.ec.history.get_event_history(self.settings, self._config, self._mongodb,
self._table_events, self, self._logger, query)
class StatusTableRules(StatusTable):
@@ -3897,10 +3892,7 @@ def load_configuration(settings, slave_status, mongodb, logger):
logger.getChild("StatusServer").setLevel(levels["cmk.mkeventd.StatusServer"])
logger.getChild("lock").setLevel(levels["cmk.mkeventd.lock"])
- # Configure the auto deleting indexes in the DB when mongodb is enabled
- if config['archive_mode'] == 'mongodb':
- cmk.ec.history.update_mongodb_indexes(settings, mongodb)
- cmk.ec.history.update_mongodb_history_lifetime(settings, config, mongodb)
+ cmk.ec.history.configure_event_history(settings, config, mongodb)
# Are we a replication slave? Parts of the configuration
# will be overridden by values from the master.