summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: f2a35ce)
raw | patch | inline | side by side (parent: f2a35ce)
author | Florian Forster <octo@leeloo.lan.home.verplant.org> | |
Sat, 10 Oct 2009 15:04:09 +0000 (17:04 +0200) | ||
committer | Florian Forster <octo@leeloo.lan.home.verplant.org> | |
Sat, 10 Oct 2009 15:04:09 +0000 (17:04 +0200) |
src/collectd.conf.in | patch | blob | history | |
src/collectd.conf.pod | patch | blob | history | |
src/network.c | patch | blob | history | |
src/types.db | patch | blob | history |
diff --git a/src/collectd.conf.in b/src/collectd.conf.in
index 8d14f97393290f344cb3d54d16225720767b0548..11ae74856aa13dce422ee5b80256eabc88650a7e 100644 (file)
--- a/src/collectd.conf.in
+++ b/src/collectd.conf.in
# TimeToLive "128"
# Forward false
# CacheFlush 1800
+# ReportStats false
@LOAD_PLUGIN_NETWORK@</Plugin>
#<Plugin nginx>
diff --git a/src/collectd.conf.pod b/src/collectd.conf.pod
index 2458462a3617063d337cbed2dc9a4c83fb06cee3..3ea3d4dd588ab47ed1cd63e759f76ffa19dc0390 100644 (file)
--- a/src/collectd.conf.pod
+++ b/src/collectd.conf.pod
1800 seconds, but setting this to 86400 seconds (one day) will not do much harm
either.
+=item B<ReportStats> B<true>|B<false>
+
+The network plugin cannot only receive and send statistics, it can also create
+statistics about itself. Collected data included the number of received and
+sent octets and packets, the length of the receive queue and the number of
+values handled. When set to B<true>, the I<Network plugin> will make these
+statistics available. Defaults to B<false>.
+
=back
=head2 Plugin C<nginx>
diff --git a/src/network.c b/src/network.c
index f4b87579dfda0c28b91ddb9a121b40103b478bf1..1b453753a4d0204f930afce574624a039d3e313e 100644 (file)
--- a/src/network.c
+++ b/src/network.c
static int network_config_ttl = 0;
static size_t network_config_packet_size = 1024;
static int network_config_forward = 0;
+static int network_config_stats = 0;
static sockent_t *sending_sockets = NULL;
static receive_list_entry_t *receive_list_tail = NULL;
static pthread_mutex_t receive_list_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t receive_list_cond = PTHREAD_COND_INITIALIZER;
+static uint64_t receive_list_length = 0;
static sockent_t *listen_sockets = NULL;
static struct pollfd *listen_sockets_pollfd = NULL;
static value_list_t send_buffer_vl = VALUE_LIST_STATIC;
static pthread_mutex_t send_buffer_lock = PTHREAD_MUTEX_INITIALIZER;
+/* XXX: These counters are incremented from one place only. The spot in which
+ * the values are incremented is either only reachable by one thread (the
+ * dispatch thread, for example) or locked by some lock (send_buffer_lock for
+ * example). Only if neither is true, the stats_lock is acquired. The counters
+ * are always read without holding a lock in the hope that writing 8 bytes to
+ * memory is an atomic operation. */
+static uint64_t stats_octets_rx = 0;
+static uint64_t stats_octets_tx = 0;
+static uint64_t stats_packets_rx = 0;
+static uint64_t stats_packets_tx = 0;
+static uint64_t stats_values_dispatched = 0;
+static uint64_t stats_values_not_dispatched = 0;
+static uint64_t stats_values_sent = 0;
+static uint64_t stats_values_not_sent = 0;
+static pthread_mutex_t stats_lock = PTHREAD_MUTEX_INITIALIZER;
+
/*
* Private functions
*/
if (!check_receive_okay (vl))
{
#if COLLECT_DEBUG
- char name[6*DATA_MAX_NAME_LEN];
- FORMAT_VL (name, sizeof (name), vl);
- name[sizeof (name) - 1] = 0;
- DEBUG ("network plugin: network_dispatch_values: "
- "NOT dispatching %s.", name);
+ char name[6*DATA_MAX_NAME_LEN];
+ FORMAT_VL (name, sizeof (name), vl);
+ name[sizeof (name) - 1] = 0;
+ DEBUG ("network plugin: network_dispatch_values: "
+ "NOT dispatching %s.", name);
#endif
+ stats_values_not_dispatched++;
return (0);
}
}
plugin_dispatch_values (vl);
+ stats_values_dispatched++;
meta_data_destroy (vl->meta);
vl->meta = NULL;
ent = receive_list_head;
if (ent != NULL)
receive_list_head = ent->next;
+ receive_list_length--;
pthread_mutex_unlock (&receive_list_lock);
/* Check whether we are supposed to exit. We do NOT check `listen_loop'
receive_list_entry_t *private_list_head;
receive_list_entry_t *private_list_tail;
+ uint64_t private_list_length;
assert (listen_sockets_num > 0);
private_list_head = NULL;
private_list_tail = NULL;
+ private_list_length = 0;
while (listen_loop == 0)
{
return (-1);
}
+ stats_octets_rx += ((uint64_t) buffer_len);
+ stats_packets_rx++;
+
/* TODO: Possible performance enhancement: Do not free
* these entries in the dispatch thread but put them in
* another list, so we don't have to allocate more and
ent->data = malloc (network_config_packet_size);
if (ent->data == NULL)
{
+ sfree (ent);
ERROR ("network plugin: malloc failed.");
return (-1);
}
else
private_list_tail->next = ent;
private_list_tail = ent;
+ private_list_length++;
/* Do not block here. Blocking here has led to
* insufficient performance in the past. */
if (pthread_mutex_trylock (&receive_list_lock) == 0)
{
+ assert (((receive_list_head == NULL) && (receive_list_length == 0))
+ || ((receive_list_head != NULL) && (receive_list_length != 0)));
+
if (receive_list_head == NULL)
receive_list_head = private_list_head;
else
receive_list_tail->next = private_list_head;
receive_list_tail = private_list_tail;
-
- private_list_head = NULL;
- private_list_tail = NULL;
+ receive_list_length += private_list_length;
pthread_cond_signal (&receive_list_cond);
pthread_mutex_unlock (&receive_list_lock);
+
+ private_list_head = NULL;
+ private_list_tail = NULL;
+ private_list_length = 0;
}
} /* for (listen_sockets_pollfd) */
} /* while (listen_loop == 0) */
else
receive_list_tail->next = private_list_head;
receive_list_tail = private_list_tail;
+ receive_list_length += private_list_length;
private_list_head = NULL;
private_list_tail = NULL;
+ private_list_length = 0;
pthread_cond_signal (&receive_list_cond);
pthread_mutex_unlock (&receive_list_lock);
send_buffer_fill);
network_send_buffer (send_buffer, (size_t) send_buffer_fill);
+
+ stats_octets_tx += ((uint64_t) send_buffer_fill);
+ stats_packets_tx++;
+
network_init_buffer ();
}
DEBUG ("network plugin: network_write: "
"NOT sending %s.", name);
#endif
+ /* Counter is not protected by another lock and may be reached by
+ * multiple threads */
+ pthread_mutex_lock (&stats_lock);
+ stats_values_not_sent++;
+ pthread_mutex_unlock (&stats_lock);
return (0);
}
/* status == bytes added to the buffer */
send_buffer_fill += status;
send_buffer_ptr += status;
+
+ stats_values_sent++;
}
else
{
{
send_buffer_fill += status;
send_buffer_ptr += status;
+
+ stats_values_sent++;
}
}
network_config_set_buffer_size (child);
else if (strcasecmp ("Forward", child->key) == 0)
network_config_set_boolean (child, &network_config_forward);
+ else if (strcasecmp ("ReportStats", child->key) == 0)
+ network_config_set_boolean (child, &network_config_stats);
else if (strcasecmp ("CacheFlush", child->key) == 0)
/* no op for backwards compatibility only */;
else
return (0);
} /* int network_shutdown */
+static int network_stats_read (void) /* {{{ */
+{
+ uint64_t copy_octets_rx;
+ uint64_t copy_octets_tx;
+ uint64_t copy_packets_rx;
+ uint64_t copy_packets_tx;
+ uint64_t copy_values_dispatched;
+ uint64_t copy_values_not_dispatched;
+ uint64_t copy_values_sent;
+ uint64_t copy_values_not_sent;
+ uint64_t copy_receive_list_length;
+ value_list_t vl = VALUE_LIST_INIT;
+ value_t values[2];
+
+ copy_octets_rx = stats_octets_rx;
+ copy_octets_tx = stats_octets_tx;
+ copy_packets_rx = stats_packets_rx;
+ copy_packets_tx = stats_packets_tx;
+ copy_values_dispatched = stats_values_dispatched;
+ copy_values_not_dispatched = stats_values_not_dispatched;
+ copy_values_sent = stats_values_sent;
+ copy_values_not_sent = stats_values_not_sent;
+ copy_receive_list_length = receive_list_length;
+
+ /* Initialize `vl' */
+ vl.values = values;
+ vl.values_len = 2;
+ vl.time = 0;
+ vl.interval = interval_g;
+ sstrncpy (vl.host, hostname_g, sizeof (vl.host));
+ sstrncpy (vl.plugin, "network", sizeof (vl.plugin));
+
+ /* Octets received / sent */
+ vl.values[0].counter = (counter_t) copy_octets_rx;
+ vl.values[1].counter = (counter_t) copy_octets_tx;
+ sstrncpy (vl.type, "if_octets", sizeof (vl.type));
+ plugin_dispatch_values (&vl);
+
+ /* Packets received / send */
+ vl.values[0].counter = (counter_t) copy_packets_rx;
+ vl.values[1].counter = (counter_t) copy_packets_tx;
+ sstrncpy (vl.type, "if_packets", sizeof (vl.type));
+ plugin_dispatch_values (&vl);
+
+ /* Values (not) dispatched and (not) send */
+ sstrncpy (vl.type, "total_values", sizeof (vl.type));
+ vl.values_len = 1;
+
+ vl.values[0].derive = (derive_t) copy_values_dispatched;
+ sstrncpy (vl.type_instance, "dispatch-accepted",
+ sizeof (vl.type_instance));
+ plugin_dispatch_values (&vl);
+
+ vl.values[0].derive = (derive_t) copy_values_not_dispatched;
+ sstrncpy (vl.type_instance, "dispatch-rejected",
+ sizeof (vl.type_instance));
+ plugin_dispatch_values (&vl);
+
+ vl.values[0].derive = (derive_t) copy_values_sent;
+ sstrncpy (vl.type_instance, "send-accepted",
+ sizeof (vl.type_instance));
+ plugin_dispatch_values (&vl);
+
+ vl.values[0].derive = (derive_t) copy_values_not_sent;
+ sstrncpy (vl.type_instance, "send-rejected",
+ sizeof (vl.type_instance));
+ plugin_dispatch_values (&vl);
+
+ /* Receive queue length */
+ vl.values[0].gauge = (gauge_t) copy_receive_list_length;
+ sstrncpy (vl.type, "queue_length", sizeof (vl.type));
+ vl.type_instance[0] = 0;
+ plugin_dispatch_values (&vl);
+
+ return (0);
+} /* }}} int network_stats_read */
+
static int network_init (void)
{
static _Bool have_init = false;
gcry_control (GCRYCTL_INITIALIZATION_FINISHED, 0);
#endif
+ if (network_config_stats != 0)
+ plugin_register_read ("network", network_stats_read);
+
plugin_register_shutdown ("network", network_shutdown);
send_buffer = malloc (network_config_packet_size);
diff --git a/src/types.db b/src/types.db
index 64c99e8e61c033280fb5bf3033e3cacfe9b64458..0225e0f6b8e6586bb44db3fb8d83baefbeb9eab7 100644 (file)
--- a/src/types.db
+++ b/src/types.db
time_offset seconds:GAUGE:-1000000:1000000
total_requests value:DERIVE:0:U
total_time_in_ms value:DERIVE:0:U
+total_values value:DERIVE:0:U
uptime value:GAUGE:0:4294967295
users users:GAUGE:0:65535
virt_cpu_total ns:COUNTER:0:256000000000