Module: check_mk
Branch: master
Commit: c7c9da4723030d2a120caa6a5c77851738e49bc9
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=c7c9da4723030d…
Author: Mathias Kettner <mk(a)mathias-kettner.de>
Date: Mon Dec 20 20:32:51 2010 +0100
Updated internal doku
---
LIESMICH.zutun | 10 ++++++++++
1 files changed, 10 insertions(+), 0 deletions(-)
diff --git a/LIESMICH.zutun b/LIESMICH.zutun
index dab0a6a..b8cd8af 100644
--- a/LIESMICH.zutun
+++ b/LIESMICH.zutun
@@ -22,6 +22,16 @@ Ein sinnvoller Default-Wert wäre aber schon gut.
--------------------------------------------------------------------------------
BUGS beheben ab 1.1.9i1
--------------------------------------------------------------------------------
+Multisite: Endgültig kompatibel mit IE machen. Bekannte Probleme:
+* reschedule Knopf geht irgendwie garnicht. Weder Animation noch reschedule.
+ Nur das reload macht er
+* Bug mit Verschieben der Sidebar wo das Tabellengerüst stehen bleibt
+* Snapin "Views": Die Spiegelpunkt sind zu weit links, die Einrückung
+ klappt nicht.
+
+Multsite: Kompatible mit Chrome machen. Probleme:
+* reschedule Knopf wird nicht animiert
+
WATO: Wenn ein Agent einen rauswirft (leere Ausgabe, Connection closed), kommt im
WATO bei der Inventur keine sinnvolle Fehlermeldung.
Module: check_mk
Branch: master
Commit: c035d82729704e2b2e3cad430e92c736f7bd203d
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=c035d82729704e…
Author: Mathias Kettner <mk(a)mathias-kettner.de>
Date: Mon Dec 20 17:13:08 2010 +0100
printer_supply: handle case where no current available
---
ChangeLog | 1 +
checks/printer_supply | 2 +-
2 files changed, 2 insertions(+), 1 deletions(-)
diff --git a/ChangeLog b/ChangeLog
index 6df57d9..f47fa0b 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -35,6 +35,7 @@
* Fixed problem with OnlyFrom: in Linux agent (df didn't work properly)
* cups_queues: fixed plugin error due to invalid import of datetime,
converted other checks from 'from datetime import...' to 'import datetime'.
+ * printer_supply: handle the case where the current value is missing
Livestatus:
* new column pnpgraph_present in table host and service. In order for this
diff --git a/checks/printer_supply b/checks/printer_supply
index a6df030..ed387fc 100644
--- a/checks/printer_supply
+++ b/checks/printer_supply
@@ -79,7 +79,7 @@ def check_printer_supply(item, params, info):
for line in printer_supply_fix_infos(info):
if line[0] == item:
maxlevel = float(line[1])
- current = float(line[2])
+ current = savefloat(line[2])
leftperc = 100.0 * current / maxlevel
warn, crit = params # in percent
infotext = "%.0f%% (levels at %.0f%% / %.0f%%)" % (leftperc, warn, crit)
Module: check_mk
Branch: master
Commit: 5be217725b7c213cc1a3c975b928a12027d4d23e
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=5be217725b7c21…
Author: Mathias Kettner <mk(a)mathias-kettner.de>
Date: Mon Dec 20 16:12:00 2010 +0100
Livestatus: use Columns: instead of StatsGroupBy:
StatsGroupBy: is now deprecated. Columns: does the same
thing for you.
---
ChangeLog | 3 +++
livestatus/src/Query.cc | 31 ++++++++-----------------------
livestatus/src/Query.h | 3 ---
3 files changed, 11 insertions(+), 26 deletions(-)
diff --git a/ChangeLog b/ChangeLog
index 15fe292..ccef56e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -4,6 +4,9 @@
* Multisite: removed (undocumented) view parameters show_buttons and show_controls.
Please use display_options instead.
* Finally removed deprecated filesystem_levels. Please use check_parameters instead.
+ * Livestatus: The StatsGroupBy: header is still working but now deprecated.
+ Please simply use Columns: instead. If your query contains at least one Stats:
+ header than Columns: has the meaning of the old StatsGroupBy: header
Core, Setup, etc.:
* Create alias 'cmk' for check_mk in bin/ (easier typing)
diff --git a/livestatus/src/Query.cc b/livestatus/src/Query.cc
index c9a3542..877b765 100644
--- a/livestatus/src/Query.cc
+++ b/livestatus/src/Query.cc
@@ -177,12 +177,12 @@ Column *Query::createDummyColumn(const char *name)
}
-
void Query::addColumn(Column *column)
{
_columns.push_back(column);
}
+
bool Query::hasNoColumns()
{
return _columns.size() == 0 && !doStats();
@@ -419,23 +419,8 @@ void Query::parseAuthUserHeader(char *line)
void Query::parseStatsGroupLine(char *line)
{
- if (!_table)
- return;
-
- char *column_name;
- while (column_name = next_field(&line)) {
- Column *column = _table->column(column_name);
- if (!column) {
- _output->setError(RESPONSE_CODE_INVALID_HEADER, "StatsGroupBy: unknown column '%s'", column_name);
- return;
- }
- _stats_group_columns.push_back(column);
- }
-
- if (_stats_group_columns.size() == 0) {
- _output->setError(RESPONSE_CODE_INVALID_HEADER, "StatsGroupBy: missing an argument");
- return;
- }
+ logger(LOG_WARNING, "Warning: StatsGroupBy is deprecated. Please use Columns instead.");
+ parseColumnsLine(line);
}
@@ -645,7 +630,7 @@ void Query::start()
// if we have no StatsGroupBy: column, we allocate one only row of Aggregators,
// directly in _stats_aggregators. When grouping the rows of aggregators
// will be created each time a new group is found.
- if (_stats_group_columns.size() == 0)
+ if (_columns.size() == 0)
{
_stats_aggregators = new Aggregator *[_stats_columns.size()];
for (unsigned i=0; i<_stats_columns.size(); i++)
@@ -691,7 +676,7 @@ bool Query::processDataset(void *data)
Aggregator **aggr;
// When doing grouped stats, we need to fetch/create a row
// of aggregators for the current group
- if (_stats_group_columns.size() > 0) {
+ if (_columns.size() > 0) {
_stats_group_spec_t groupspec;
computeStatsGroupSpec(groupspec, data);
aggr = getStatsGroup(groupspec);
@@ -732,7 +717,7 @@ bool Query::processDataset(void *data)
void Query::finish()
{
// grouped stats
- if (doStats() && _stats_group_columns.size() > 0)
+ if (doStats() && _columns.size() > 0)
{
// output values of all stats groups (output has been post poned until now)
for (_stats_groups_t::iterator it = _stats_groups.begin();
@@ -984,8 +969,8 @@ Aggregator **Query::getStatsGroup(Query::_stats_group_spec_t &groupspec)
void Query::computeStatsGroupSpec(Query::_stats_group_spec_t &groupspec, void *data)
{
- for (_stats_group_columns_t::iterator it = _stats_group_columns.begin();
- it != _stats_group_columns.end();
+ for (_columns_t::iterator it = _columns.begin();
+ it != _columns.end();
++it)
{
Column *column = *it;
diff --git a/livestatus/src/Query.h b/livestatus/src/Query.h
index 5cc1955..1a85b63 100644
--- a/livestatus/src/Query.h
+++ b/livestatus/src/Query.h
@@ -87,9 +87,6 @@ class Query
_stats_columns_t _stats_columns; // must also delete
Aggregator **_stats_aggregators;
- typedef vector <Column *> _stats_group_columns_t;
- _stats_group_columns_t _stats_group_columns;
-
typedef vector<string> _stats_group_spec_t;
typedef map<_stats_group_spec_t, Aggregator **> _stats_groups_t;
_stats_groups_t _stats_groups;
Module: check_mk
Branch: master
Commit: 1822486a776386bff491ba50644d5b5e3effe9c4
URL: http://git.mathias-kettner.de/git/?p=check_mk.git;a=commit;h=1822486a776386…
Author: Mathias Kettner <mk(a)mathias-kettner.de>
Date: Mon Dec 20 15:57:27 2010 +0100
Livestatus: StatsGroupBy: allows more than one columns
---
ChangeLog | 1 +
LIESMICH.zutun | 9 +---
livestatus/src/Query.cc | 110 ++++++++++++++++++++++++++++++----------------
livestatus/src/Query.h | 11 +++-
4 files changed, 82 insertions(+), 49 deletions(-)
diff --git a/ChangeLog b/ChangeLog
index d60c93d..15fe292 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -34,6 +34,7 @@
* new column pnpgraph_present in table host and service. In order for this
column to work you need to specify the base directory of the PNP graphs
with the module option pnp_path=, e.g. pnp_path=/omd/sites/wato/var/pnp4nagios/perfdata
+ * Allow more than one column for StatsGroupBy:
1.1.9i2:
Checks & Agents:
diff --git a/LIESMICH.zutun b/LIESMICH.zutun
index ecbbb5d..b749764 100644
--- a/LIESMICH.zutun
+++ b/LIESMICH.zutun
@@ -71,10 +71,6 @@ inventurisiert werden, und darf dann die betroffenen Services wegwerfen.
Cluster-Inventur(2): Wenn ein Service auf der Node sitzt und man die Regeln
zu Clustered services ändert, entfernt ein -II das nicht von der node.
-Livestatus: wenn man mehrere StatsGroupBy macht, wird der Speicher von den
-alten nicht freigegeben (ist momentan eh nicht erlaubt. Fehlermeldung ausgeben).
-In neuer Version sollte man irgendwann mehrere Group-Header erlauben.
-
Multisite: Table log: Hier wird nur der erste Site angezeigt, bzw. die Chronologie
Reihenfolge stimmt nicht. Sortieren nach Sekunden plus Zeilennummer.
@@ -462,12 +458,9 @@ mit dem Windows-Check erstellen (wo ein Graph eh fehlt).
Multisite: Quicksearch evtl. case-insensitive machen?
-Livestatus: StatsGroupBy: mehrere Spalten erlauben
-
Multisite: Die TOP-25 Alerts: Welche Dinge hosts/services hatten in einem
Zeitraum die meisten Alerts. Das geht mit log und StatsGroupBy: host_name
-service_description. Dazu muss ich allerdings zuerst eine Gruppierung nach
-mehr als einem Feld einbauen.
+service_description.
Multisite: Filter, der Hosts zeigt, die entweder selbst Summary hosts sind
oder keinen haben.
diff --git a/livestatus/src/Query.cc b/livestatus/src/Query.cc
index 6e7a130..c9a3542 100644
--- a/livestatus/src/Query.cc
+++ b/livestatus/src/Query.cc
@@ -62,8 +62,7 @@ Query::Query(InputBuffer *input, OutputBuffer *output, Table *table) :
_output_format(OUTPUT_FORMAT_CSV),
_limit(-1),
_current_line(0),
- _timezone_offset(0),
- _stats_group_column(0)
+ _timezone_offset(0)
{
while (input->moreLines())
{
@@ -151,6 +150,8 @@ Query::Query(InputBuffer *input, OutputBuffer *output, Table *table) :
Query::~Query()
{
+ return;
+
// delete dummy-columns
for (_columns_t::iterator it = _dummy_columns.begin();
it != _dummy_columns.end();
@@ -418,22 +419,23 @@ void Query::parseAuthUserHeader(char *line)
void Query::parseStatsGroupLine(char *line)
{
- if (!_table)
- return;
-
- char *column_name = next_field(&line);
- if (!column_name) {
- _output->setError(RESPONSE_CODE_INVALID_HEADER, "StatsGroupBy: missing an argument");
- return;
- }
+ if (!_table)
+ return;
- Column *column = _table->column(column_name);
- if (!column) {
- _output->setError(RESPONSE_CODE_INVALID_HEADER, "StatsGroupBy: unknown column '%s'", column_name);
- return;
- }
+ char *column_name;
+ while (column_name = next_field(&line)) {
+ Column *column = _table->column(column_name);
+ if (!column) {
+ _output->setError(RESPONSE_CODE_INVALID_HEADER, "StatsGroupBy: unknown column '%s'", column_name);
+ return;
+ }
+ _stats_group_columns.push_back(column);
+ }
- _stats_group_column = column;
+ if (_stats_group_columns.size() == 0) {
+ _output->setError(RESPONSE_CODE_INVALID_HEADER, "StatsGroupBy: missing an argument");
+ return;
+ }
}
@@ -628,6 +630,7 @@ bool Query::doStats()
return _stats_columns.size() > 0;
}
+
void Query::start()
{
doWait();
@@ -642,7 +645,7 @@ void Query::start()
// if we have no StatsGroupBy: column, we allocate one only row of Aggregators,
// directly in _stats_aggregators. When grouping the rows of aggregators
// will be created each time a new group is found.
- if (!_stats_group_column)
+ if (_stats_group_columns.size() == 0)
{
_stats_aggregators = new Aggregator *[_stats_columns.size()];
for (unsigned i=0; i<_stats_columns.size(); i++)
@@ -685,21 +688,22 @@ bool Query::processDataset(void *data)
if (doStats())
{
- Aggregator **aggr;
- // When doing grouped stats, we need to fetch/create a row
- // of aggregators for the current group
- if (_stats_group_column) {
- string groupname = _stats_group_column->valueAsString(data, this);
- aggr = getStatsGroup(groupname);
- }
- else
- aggr = _stats_aggregators;
-
- for (unsigned i=0; i<_stats_columns.size(); i++)
- aggr[i]->consume(data, this);
-
- // No output is done while processing the data, we only
- // collect stats.
+ Aggregator **aggr;
+ // When doing grouped stats, we need to fetch/create a row
+ // of aggregators for the current group
+ if (_stats_group_columns.size() > 0) {
+ _stats_group_spec_t groupspec;
+ computeStatsGroupSpec(groupspec, data);
+ aggr = getStatsGroup(groupspec);
+ }
+ else
+ aggr = _stats_aggregators;
+
+ for (unsigned i=0; i<_stats_columns.size(); i++)
+ aggr[i]->consume(data, this);
+
+ // No output is done while processing the data, we only
+ // collect stats.
}
else
{
@@ -728,16 +732,32 @@ bool Query::processDataset(void *data)
void Query::finish()
{
// grouped stats
- if (doStats() && _stats_group_column)
+ if (doStats() && _stats_group_columns.size() > 0)
{
+ // output values of all stats groups (output has been post poned until now)
for (_stats_groups_t::iterator it = _stats_groups.begin();
it != _stats_groups.end();
++it)
{
+ // dataset separator after first group
if (it != _stats_groups.begin() && _output_format != OUTPUT_FORMAT_CSV)
_output->addBuffer(",\n", 2);
+
outputDatasetBegin();
- outputString(it->first.c_str());
+
+ // output group columns first
+ _stats_group_spec_t groupspec = it->first;
+ bool first = true;
+ for (_stats_group_spec_t::iterator iit = groupspec.begin();
+ iit != groupspec.end();
+ ++iit)
+ {
+ if (!first)
+ outputFieldSeparator();
+ else
+ first = false;
+ outputString((*iit).c_str());
+ }
Aggregator **aggr = it->second;
for (unsigned i=0; i<_stats_columns.size(); i++) {
@@ -818,7 +838,7 @@ void Query::outputInteger(int32_t value)
void Query::outputInteger64(int64_t value)
{
char buf[32];
- int l = snprintf(buf, 32, "%lld", value);
+ int l = snprintf(buf, 32, "%lld", (long long int )value);
_output->addBuffer(buf, l);
}
@@ -948,20 +968,32 @@ void Query::outputEndSublist()
_output->addChar(']');
}
-Aggregator **Query::getStatsGroup(string name)
+Aggregator **Query::getStatsGroup(Query::_stats_group_spec_t &groupspec)
{
- _stats_groups_t::iterator it = _stats_groups.find(name);
+ _stats_groups_t::iterator it = _stats_groups.find(groupspec);
if (it == _stats_groups.end()) {
Aggregator **aggr = new Aggregator *[_stats_columns.size()];
for (unsigned i=0; i<_stats_columns.size(); i++)
aggr[i] = _stats_columns[i]->createAggregator();
- _stats_groups.insert(make_pair(string(name), aggr));
+ _stats_groups.insert(make_pair(groupspec, aggr));
return aggr;
}
else
return it->second;
}
+void Query::computeStatsGroupSpec(Query::_stats_group_spec_t &groupspec, void *data)
+{
+ for (_stats_group_columns_t::iterator it = _stats_group_columns.begin();
+ it != _stats_group_columns.end();
+ ++it)
+ {
+ Column *column = *it;
+ groupspec.push_back(column->valueAsString(data, this));
+ }
+}
+
+
void Query::doWait()
{
// If no wait condition and no trigger is set,
@@ -1016,3 +1048,5 @@ void Query::doWait()
}
} while (!_wait_condition.accepts(_wait_object));
}
+
+
diff --git a/livestatus/src/Query.h b/livestatus/src/Query.h
index 49520e2..5cc1955 100644
--- a/livestatus/src/Query.h
+++ b/livestatus/src/Query.h
@@ -86,8 +86,12 @@ class Query
typedef vector<StatsColumn *> _stats_columns_t;
_stats_columns_t _stats_columns; // must also delete
Aggregator **_stats_aggregators;
- Column *_stats_group_column;
- typedef map<string, Aggregator **> _stats_groups_t;
+
+ typedef vector <Column *> _stats_group_columns_t;
+ _stats_group_columns_t _stats_group_columns;
+
+ typedef vector<string> _stats_group_spec_t;
+ typedef map<_stats_group_spec_t, Aggregator **> _stats_groups_t;
_stats_groups_t _stats_groups;
public:
@@ -128,7 +132,8 @@ public:
private:
bool doStats();
void doWait();
- Aggregator **getStatsGroup(string name);
+ Aggregator **getStatsGroup(_stats_group_spec_t &groupspec);
+ void computeStatsGroupSpec(_stats_group_spec_t &groupspec, void *data);
Filter *createFilter(Column *column, int operator_id, char *value);
void parseFilterLine(char *line, bool filter /* and not cond */);
void parseStatsLine(char *line);