1 /**
2 * collectd - src/ceph.c
3 * Copyright (C) 2011 New Dream Network
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; only version 2 of the License is applicable.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 *
18 * Authors:
19 * Colin McCabe <cmccabe@alumni.cmu.edu>
20 * Dennis Zou <yunzou@cisco.com>
21 * Dan Ryder <daryder@cisco.com>
22 **/
24 #define _BSD_SOURCE
26 #include "collectd.h"
27 #include "common.h"
28 #include "plugin.h"
30 #include <arpa/inet.h>
31 #include <errno.h>
32 #include <fcntl.h>
33 #include <yajl/yajl_parse.h>
34 #if HAVE_YAJL_YAJL_VERSION_H
35 #include <yajl/yajl_version.h>
36 #endif
38 #include <limits.h>
39 #include <poll.h>
40 #include <stdint.h>
41 #include <stdio.h>
42 #include <stdlib.h>
43 #include <string.h>
44 #include <strings.h>
45 #include <sys/socket.h>
46 #include <sys/time.h>
47 #include <sys/types.h>
48 #include <sys/un.h>
49 #include <unistd.h>
50 #include <math.h>
51 #include <inttypes.h>
53 #define RETRY_AVGCOUNT -1
55 #if defined(YAJL_MAJOR) && (YAJL_MAJOR > 1)
56 # define HAVE_YAJL_V2 1
57 #endif
59 #define RETRY_ON_EINTR(ret, expr) \
60 while(1) { \
61 ret = expr; \
62 if(ret >= 0) \
63 break; \
64 ret = -errno; \
65 if(ret != -EINTR) \
66 break; \
67 }
69 /** Timeout interval in seconds */
70 #define CEPH_TIMEOUT_INTERVAL 1
72 /** Maximum path length for a UNIX domain socket on this system */
73 #define UNIX_DOMAIN_SOCK_PATH_MAX (sizeof(((struct sockaddr_un*)0)->sun_path))
75 /** Yajl callback returns */
76 #define CEPH_CB_CONTINUE 1
77 #define CEPH_CB_ABORT 0
79 #if HAVE_YAJL_V2
80 typedef size_t yajl_len_t;
81 #else
82 typedef unsigned int yajl_len_t;
83 #endif
85 /** Number of types for ceph defined in types.db */
86 #define CEPH_DSET_TYPES_NUM 3
87 /** ceph types enum */
88 enum ceph_dset_type_d
89 {
90 DSET_LATENCY = 0,
91 DSET_BYTES = 1,
92 DSET_RATE = 2,
93 DSET_TYPE_UNFOUND = 1000
94 };
96 /** Valid types for ceph defined in types.db */
97 const char * ceph_dset_types [CEPH_DSET_TYPES_NUM] =
98 {"ceph_latency", "ceph_bytes", "ceph_rate"};
100 /******* ceph_daemon *******/
101 struct ceph_daemon
102 {
103 /** Version of the admin_socket interface */
104 uint32_t version;
105 /** daemon name **/
106 char name[DATA_MAX_NAME_LEN];
108 /** Path to the socket that we use to talk to the ceph daemon */
109 char asok_path[UNIX_DOMAIN_SOCK_PATH_MAX];
111 /** Number of counters */
112 int ds_num;
113 /** Track ds types */
114 uint32_t *ds_types;
115 /** Track ds names to match with types */
116 char **ds_names;
118 /**
119 * Keep track of last data for latency values so we can calculate rate
120 * since last poll.
121 */
122 struct last_data **last_poll_data;
123 /** index of last poll data */
124 int last_idx;
125 };
127 /******* JSON parsing *******/
128 typedef int (*node_handler_t)(void *, const char*, const char*);
130 /** Track state and handler while parsing JSON */
131 struct yajl_struct
132 {
133 node_handler_t handler;
134 void * handler_arg;
135 struct {
136 char key[DATA_MAX_NAME_LEN];
137 int key_len;
138 } state[YAJL_MAX_DEPTH];
139 int depth;
140 };
141 typedef struct yajl_struct yajl_struct;
143 enum perfcounter_type_d
144 {
145 PERFCOUNTER_LATENCY = 0x4, PERFCOUNTER_DERIVE = 0x8,
146 };
148 /** Give user option to use default (long run = since daemon started) avg */
149 static int long_run_latency_avg = 0;
151 /**
152 * Give user option to use default type for special cases -
153 * filestore.journal_wr_bytes is currently only metric here. Ceph reports the
154 * type as a sum/count pair and will calculate it the same as a latency value.
155 * All other "bytes" metrics (excluding the used/capacity bytes for the OSD)
156 * use the DERIVE type. Unless user specifies to use given type, convert this
157 * metric to use DERIVE.
158 */
159 static int convert_special_metrics = 1;
161 /** Array of daemons to monitor */
162 static struct ceph_daemon **g_daemons = NULL;
164 /** Number of elements in g_daemons */
165 static int g_num_daemons = 0;
167 /**
168 * A set of data that we build up in memory while parsing the JSON.
169 */
170 struct values_tmp
171 {
172 /** ceph daemon we are processing data for*/
173 struct ceph_daemon *d;
174 /** track avgcount across counters for avgcount/sum latency pairs */
175 uint64_t avgcount;
176 /** current index of counters - used to get type of counter */
177 int index;
178 /** do we already have an avgcount for latency pair */
179 int avgcount_exists;
180 /**
181 * similar to index, but current index of latency type counters -
182 * used to get last poll data of counter
183 */
184 int latency_index;
185 /**
186 * values list - maintain across counters since
187 * host/plugin/plugin instance are always the same
188 */
189 value_list_t vlist;
190 };
192 /**
193 * A set of count/sum pairs to keep track of latency types and get difference
194 * between this poll data and last poll data.
195 */
196 struct last_data
197 {
198 char ds_name[DATA_MAX_NAME_LEN];
199 double last_sum;
200 uint64_t last_count;
201 };
203 /******* network I/O *******/
204 enum cstate_t
205 {
206 CSTATE_UNCONNECTED = 0,
207 CSTATE_WRITE_REQUEST,
208 CSTATE_READ_VERSION,
209 CSTATE_READ_AMT,
210 CSTATE_READ_JSON,
211 };
213 enum request_type_t
214 {
215 ASOK_REQ_VERSION = 0,
216 ASOK_REQ_DATA = 1,
217 ASOK_REQ_SCHEMA = 2,
218 ASOK_REQ_NONE = 1000,
219 };
221 struct cconn
222 {
223 /** The Ceph daemon that we're talking to */
224 struct ceph_daemon *d;
226 /** Request type */
227 uint32_t request_type;
229 /** The connection state */
230 enum cstate_t state;
232 /** The socket we use to talk to this daemon */
233 int asok;
235 /** The amount of data remaining to read / write. */
236 uint32_t amt;
238 /** Length of the JSON to read */
239 uint32_t json_len;
241 /** Buffer containing JSON data */
242 unsigned char *json;
244 /** Keep data important to yajl processing */
245 struct yajl_struct yajl;
246 };
248 static int ceph_cb_null(void *ctx)
249 {
250 return CEPH_CB_CONTINUE;
251 }
253 static int ceph_cb_boolean(void *ctx, int bool_val)
254 {
255 return CEPH_CB_CONTINUE;
256 }
258 static int
259 ceph_cb_number(void *ctx, const char *number_val, yajl_len_t number_len)
260 {
261 yajl_struct *yajl = (yajl_struct*)ctx;
262 char buffer[number_len+1];
263 int i, latency_type = 0, result;
264 char key[128];
266 memcpy(buffer, number_val, number_len);
267 buffer[sizeof(buffer) - 1] = 0;
269 ssnprintf(key, yajl->state[0].key_len, "%s", yajl->state[0].key);
270 for(i = 1; i < yajl->depth; i++)
271 {
272 if((i == yajl->depth-1) && ((strcmp(yajl->state[i].key,"avgcount") == 0)
273 || (strcmp(yajl->state[i].key,"sum") == 0)))
274 {
275 if(convert_special_metrics)
276 {
277 /**
278 * Special case for filestore:JournalWrBytes. For some reason,
279 * Ceph schema encodes this as a count/sum pair while all
280 * other "Bytes" data (excluding used/capacity bytes for OSD
281 * space) uses a single "Derive" type. To spare further
282 * confusion, keep this KPI as the same type of other "Bytes".
283 * Instead of keeping an "average" or "rate", use the "sum" in
284 * the pair and assign that to the derive value.
285 */
286 if((strcmp(yajl->state[i-1].key, "journal_wr_bytes") == 0) &&
287 (strcmp(yajl->state[i-2].key,"filestore") == 0) &&
288 (strcmp(yajl->state[i].key,"avgcount") == 0))
289 {
290 DEBUG("ceph plugin: Skipping avgcount for filestore.JournalWrBytes");
291 yajl->depth = (yajl->depth - 1);
292 return CEPH_CB_CONTINUE;
293 }
294 }
295 //probably a avgcount/sum pair. if not - we'll try full key later
296 latency_type = 1;
297 break;
298 }
299 strncat(key, ".", 1);
300 strncat(key, yajl->state[i].key, yajl->state[i].key_len+1);
301 }
303 result = yajl->handler(yajl->handler_arg, buffer, key);
305 if((result == RETRY_AVGCOUNT) && latency_type)
306 {
307 strncat(key, ".", 1);
308 strncat(key, yajl->state[yajl->depth-1].key,
309 yajl->state[yajl->depth-1].key_len+1);
310 result = yajl->handler(yajl->handler_arg, buffer, key);
311 }
313 if(result == -ENOMEM)
314 {
315 ERROR("ceph plugin: memory allocation failed");
316 return CEPH_CB_ABORT;
317 }
319 yajl->depth = (yajl->depth - 1);
320 return CEPH_CB_CONTINUE;
321 }
323 static int ceph_cb_string(void *ctx, const unsigned char *string_val,
324 yajl_len_t string_len)
325 {
326 return CEPH_CB_CONTINUE;
327 }
329 static int ceph_cb_start_map(void *ctx)
330 {
331 return CEPH_CB_CONTINUE;
332 }
334 static int
335 ceph_cb_map_key(void *ctx, const unsigned char *key, yajl_len_t string_len)
336 {
337 yajl_struct *yajl = (yajl_struct*)ctx;
339 if((yajl->depth+1) >= YAJL_MAX_DEPTH)
340 {
341 ERROR("ceph plugin: depth exceeds max, aborting.");
342 return CEPH_CB_ABORT;
343 }
345 char buffer[string_len+1];
347 memcpy(buffer, key, string_len);
348 buffer[sizeof(buffer) - 1] = 0;
350 snprintf(yajl->state[yajl->depth].key, sizeof(buffer), "%s", buffer);
351 yajl->state[yajl->depth].key_len = sizeof(buffer);
352 yajl->depth = (yajl->depth + 1);
354 return CEPH_CB_CONTINUE;
355 }
357 static int ceph_cb_end_map(void *ctx)
358 {
359 yajl_struct *yajl = (yajl_struct*)ctx;
361 yajl->depth = (yajl->depth - 1);
362 return CEPH_CB_CONTINUE;
363 }
365 static int ceph_cb_start_array(void *ctx)
366 {
367 return CEPH_CB_CONTINUE;
368 }
370 static int ceph_cb_end_array(void *ctx)
371 {
372 return CEPH_CB_CONTINUE;
373 }
375 static yajl_callbacks callbacks = {
376 ceph_cb_null,
377 ceph_cb_boolean,
378 NULL,
379 NULL,
380 ceph_cb_number,
381 ceph_cb_string,
382 ceph_cb_start_map,
383 ceph_cb_map_key,
384 ceph_cb_end_map,
385 ceph_cb_start_array,
386 ceph_cb_end_array
387 };
389 static void ceph_daemon_print(const struct ceph_daemon *d)
390 {
391 DEBUG("ceph plugin: name=%s, asok_path=%s", d->name, d->asok_path);
392 }
394 static void ceph_daemons_print(void)
395 {
396 int i;
397 for(i = 0; i < g_num_daemons; ++i)
398 {
399 ceph_daemon_print(g_daemons[i]);
400 }
401 }
403 static void ceph_daemon_free(struct ceph_daemon *d)
404 {
405 int i = 0;
406 for(; i < d->last_idx; i++)
407 {
408 sfree(d->last_poll_data[i]);
409 }
410 sfree(d->last_poll_data);
411 d->last_poll_data = NULL;
412 d->last_idx = 0;
413 for(i = 0; i < d->ds_num; i++)
414 {
415 sfree(d->ds_names[i]);
416 }
417 sfree(d->ds_types);
418 sfree(d->ds_names);
419 sfree(d);
420 }
422 /**
423 * Compact ds name by removing special characters and trimming length to
424 * DATA_MAX_NAME_LEN if necessary
425 */
426 static void compact_ds_name(char *source, char *dest)
427 {
428 int keys_num = 0, i;
429 char *save_ptr = NULL, *tmp_ptr = source;
430 char *keys[16];
431 char len_str[3];
432 char tmp[DATA_MAX_NAME_LEN];
433 size_t key_chars_remaining = (DATA_MAX_NAME_LEN-1);
434 int reserved = 0;
435 int offset = 0;
436 memset(tmp, 0, sizeof(tmp));
437 if(source == NULL || dest == NULL || source[0] == '\0' || dest[0] != '\0')
438 {
439 return;
440 }
441 size_t src_len = strlen(source);
442 snprintf(len_str, sizeof(len_str), "%zu", src_len);
443 unsigned char append_status = 0x0;
444 append_status |= (source[src_len - 1] == '-') ? 0x1 : 0x0;
445 append_status |= (source[src_len - 1] == '+') ? 0x2 : 0x0;
446 while ((keys[keys_num] = strtok_r(tmp_ptr, ":_-+", &save_ptr)) != NULL)
447 {
448 tmp_ptr = NULL;
449 /** capitalize 1st char **/
450 keys[keys_num][0] = toupper(keys[keys_num][0]);
451 keys_num++;
452 if(keys_num >= 16)
453 {
454 break;
455 }
456 }
457 /** concatenate each part of source string **/
458 for(i = 0; i < keys_num; i++)
459 {
460 strncat(tmp, keys[i], key_chars_remaining);
461 key_chars_remaining -= strlen(keys[i]);
462 }
463 tmp[DATA_MAX_NAME_LEN - 1] = '\0';
464 /** to coordinate limitation of length of type_instance
465 * we will truncate ds_name
466 * when the its length is more than
467 * DATA_MAX_NAME_LEN
468 */
469 if(strlen(tmp) > DATA_MAX_NAME_LEN - 1)
470 {
471 append_status |= 0x4;
472 /** we should reserve space for
473 * len_str
474 */
475 reserved += 2;
476 }
477 if(append_status & 0x1)
478 {
479 /** we should reserve space for
480 * "Minus"
481 */
482 reserved += 5;
483 }
484 if(append_status & 0x2)
485 {
486 /** we should reserve space for
487 * "Plus"
488 */
489 reserved += 4;
490 }
491 snprintf(dest, DATA_MAX_NAME_LEN - reserved, "%s", tmp);
492 offset = strlen(dest);
493 switch (append_status)
494 {
495 case 0x1:
496 memcpy(dest + offset, "Minus", 5);
497 break;
498 case 0x2:
499 memcpy(dest + offset, "Plus", 5);
500 break;
501 case 0x4:
502 memcpy(dest + offset, len_str, 2);
503 break;
504 case 0x5:
505 memcpy(dest + offset, "Minus", 5);
506 memcpy(dest + offset + 5, len_str, 2);
507 break;
508 case 0x6:
509 memcpy(dest + offset, "Plus", 4);
510 memcpy(dest + offset + 4, len_str, 2);
511 break;
512 default:
513 break;
514 }
515 }
517 /**
518 * Parse key to remove "type" if this is for schema and initiate compaction
519 */
520 static int parse_keys(const char *key_str, char *ds_name)
521 {
522 char *ptr, *rptr;
523 size_t ds_name_len = 0;
524 /**
525 * allow up to 100 characters before compaction - compact_ds_name will not
526 * allow more than DATA_MAX_NAME_LEN chars
527 */
528 int max_str_len = 100;
529 char tmp_ds_name[max_str_len];
530 memset(tmp_ds_name, 0, sizeof(tmp_ds_name));
531 if(ds_name == NULL || key_str == NULL || key_str[0] == '\0' ||
532 ds_name[0] != '\0')
533 {
534 return -1;
535 }
536 if((ptr = strchr(key_str, '.')) == NULL
537 || (rptr = strrchr(key_str, '.')) == NULL)
538 {
539 memcpy(tmp_ds_name, key_str, max_str_len - 1);
540 goto compact;
541 }
543 ds_name_len = (rptr - ptr) > max_str_len ? max_str_len : (rptr - ptr);
544 if((ds_name_len == 0) || strncmp(rptr + 1, "type", 4))
545 { /** copy whole key **/
546 memcpy(tmp_ds_name, key_str, max_str_len - 1);
547 }
548 else
549 {/** more than two keys **/
550 memcpy(tmp_ds_name, key_str, ((rptr - key_str) > (max_str_len - 1) ?
551 (max_str_len - 1) : (rptr - key_str)));
552 }
554 compact: compact_ds_name(tmp_ds_name, ds_name);
555 return 0;
556 }
558 /**
559 * while parsing ceph admin socket schema, save counter name and type for later
560 * data processing
561 */
562 static int ceph_daemon_add_ds_entry(struct ceph_daemon *d, const char *name,
563 int pc_type)
564 {
565 uint32_t type;
566 char ds_name[DATA_MAX_NAME_LEN];
567 memset(ds_name, 0, sizeof(ds_name));
569 if(convert_special_metrics)
570 {
571 /**
572 * Special case for filestore:JournalWrBytes. For some reason, Ceph
573 * schema encodes this as a count/sum pair while all other "Bytes" data
574 * (excluding used/capacity bytes for OSD space) uses a single "Derive"
575 * type. To spare further confusion, keep this KPI as the same type of
576 * other "Bytes". Instead of keeping an "average" or "rate", use the
577 * "sum" in the pair and assign that to the derive value.
578 */
579 if((strcmp(name,"filestore.journal_wr_bytes.type") == 0))
580 {
581 pc_type = 10;
582 }
583 }
585 d->ds_names = realloc(d->ds_names, sizeof(char *) * (d->ds_num + 1));
586 if(!d->ds_names)
587 {
588 return -ENOMEM;
589 }
591 d->ds_types = realloc(d->ds_types, sizeof(uint32_t) * (d->ds_num + 1));
592 if(!d->ds_types)
593 {
594 return -ENOMEM;
595 }
597 d->ds_names[d->ds_num] = malloc(sizeof(char) * DATA_MAX_NAME_LEN);
598 if(!d->ds_names[d->ds_num])
599 {
600 return -ENOMEM;
601 }
603 type = (pc_type & PERFCOUNTER_DERIVE) ? DSET_RATE :
604 ((pc_type & PERFCOUNTER_LATENCY) ? DSET_LATENCY : DSET_BYTES);
605 d->ds_types[d->ds_num] = type;
607 if(parse_keys(name, ds_name))
608 {
609 return 1;
610 }
612 sstrncpy(d->ds_names[d->ds_num], ds_name, DATA_MAX_NAME_LEN -1);
613 d->ds_num = (d->ds_num + 1);
615 return 0;
616 }
618 /******* ceph_config *******/
619 static int cc_handle_str(struct oconfig_item_s *item, char *dest, int dest_len)
620 {
621 const char *val;
622 if(item->values_num != 1)
623 {
624 return -ENOTSUP;
625 }
626 if(item->values[0].type != OCONFIG_TYPE_STRING)
627 {
628 return -ENOTSUP;
629 }
630 val = item->values[0].value.string;
631 if(snprintf(dest, dest_len, "%s", val) > (dest_len - 1))
632 {
633 ERROR("ceph plugin: configuration parameter '%s' is too long.\n",
634 item->key);
635 return -ENAMETOOLONG;
636 }
637 return 0;
638 }
640 static int cc_handle_bool(struct oconfig_item_s *item, int *dest)
641 {
642 if(item->values_num != 1)
643 {
644 return -ENOTSUP;
645 }
647 if(item->values[0].type != OCONFIG_TYPE_BOOLEAN)
648 {
649 return -ENOTSUP;
650 }
652 *dest = (item->values[0].value.boolean) ? 1 : 0;
653 return 0;
654 }
656 static int cc_add_daemon_config(oconfig_item_t *ci)
657 {
658 int ret, i;
659 struct ceph_daemon *array, *nd, cd;
660 memset(&cd, 0, sizeof(struct ceph_daemon));
662 if((ci->values_num != 1) || (ci->values[0].type != OCONFIG_TYPE_STRING))
663 {
664 WARNING("ceph plugin: `Daemon' blocks need exactly one string "
665 "argument.");
666 return (-1);
667 }
669 ret = cc_handle_str(ci, cd.name, DATA_MAX_NAME_LEN);
670 if(ret)
671 {
672 return ret;
673 }
675 for(i=0; i < ci->children_num; i++)
676 {
677 oconfig_item_t *child = ci->children + i;
679 if(strcasecmp("SocketPath", child->key) == 0)
680 {
681 ret = cc_handle_str(child, cd.asok_path, sizeof(cd.asok_path));
682 if(ret)
683 {
684 return ret;
685 }
686 }
687 else
688 {
689 WARNING("ceph plugin: ignoring unknown option %s", child->key);
690 }
691 }
692 if(cd.name[0] == '\0')
693 {
694 ERROR("ceph plugin: you must configure a daemon name.\n");
695 return -EINVAL;
696 }
697 else if(cd.asok_path[0] == '\0')
698 {
699 ERROR("ceph plugin(name=%s): you must configure an administrative "
700 "socket path.\n", cd.name);
701 return -EINVAL;
702 }
703 else if(!((cd.asok_path[0] == '/') ||
704 (cd.asok_path[0] == '.' && cd.asok_path[1] == '/')))
705 {
706 ERROR("ceph plugin(name=%s): administrative socket paths must begin "
707 "with '/' or './' Can't parse: '%s'\n", cd.name, cd.asok_path);
708 return -EINVAL;
709 }
711 array = realloc(g_daemons,
712 sizeof(struct ceph_daemon *) * (g_num_daemons + 1));
713 if(array == NULL)
714 {
715 /* The positive return value here indicates that this is a
716 * runtime error, not a configuration error. */
717 return ENOMEM;
718 }
719 g_daemons = (struct ceph_daemon**) array;
720 nd = malloc(sizeof(struct ceph_daemon));
721 if(!nd)
722 {
723 return ENOMEM;
724 }
725 memcpy(nd, &cd, sizeof(struct ceph_daemon));
726 g_daemons[g_num_daemons++] = nd;
727 return 0;
728 }
730 static int ceph_config(oconfig_item_t *ci)
731 {
732 int ret, i;
734 for(i = 0; i < ci->children_num; ++i)
735 {
736 oconfig_item_t *child = ci->children + i;
737 if(strcasecmp("Daemon", child->key) == 0)
738 {
739 ret = cc_add_daemon_config(child);
740 if(ret == ENOMEM)
741 {
742 ERROR("ceph plugin: Couldn't allocate memory");
743 return ret;
744 }
745 else if(ret)
746 {
747 //process other daemons and ignore this one
748 continue;
749 }
750 }
751 else if(strcasecmp("LongRunAvgLatency", child->key) == 0)
752 {
753 ret = cc_handle_bool(child, &long_run_latency_avg);
754 if(ret)
755 {
756 return ret;
757 }
758 }
759 else if(strcasecmp("ConvertSpecialMetricTypes", child->key) == 0)
760 {
761 ret = cc_handle_bool(child, &convert_special_metrics);
762 if(ret)
763 {
764 return ret;
765 }
766 }
767 else
768 {
769 WARNING("ceph plugin: ignoring unknown option %s", child->key);
770 }
771 }
772 return 0;
773 }
775 /**
776 * Parse JSON and get error message if present
777 */
778 static int
779 traverse_json(const unsigned char *json, uint32_t json_len, yajl_handle hand)
780 {
781 yajl_status status = yajl_parse(hand, json, json_len);
782 unsigned char *msg;
784 switch(status)
785 {
786 case yajl_status_error:
787 msg = yajl_get_error(hand, /* verbose = */ 1,
788 /* jsonText = */ (unsigned char *) json,
789 (unsigned int) json_len);
790 ERROR ("ceph plugin: yajl_parse failed: %s", msg);
791 yajl_free_error(hand, msg);
792 return 1;
793 case yajl_status_client_canceled:
794 return 1;
795 default:
796 return 0;
797 }
798 }
800 /**
801 * Add entry for each counter while parsing schema
802 */
803 static int
804 node_handler_define_schema(void *arg, const char *val, const char *key)
805 {
806 struct ceph_daemon *d = (struct ceph_daemon *) arg;
807 int pc_type;
808 pc_type = atoi(val);
809 return ceph_daemon_add_ds_entry(d, key, pc_type);
810 }
812 /**
813 * Latency counter does not yet have an entry in last poll data - add it.
814 */
815 static int add_last(struct ceph_daemon *d, const char *ds_n, double cur_sum,
816 uint64_t cur_count)
817 {
818 d->last_poll_data[d->last_idx] = malloc(1 * sizeof(struct last_data));
819 if(!d->last_poll_data[d->last_idx])
820 {
821 return -ENOMEM;
822 }
823 sstrncpy(d->last_poll_data[d->last_idx]->ds_name,ds_n,
824 sizeof(d->last_poll_data[d->last_idx]->ds_name));
825 d->last_poll_data[d->last_idx]->last_sum = cur_sum;
826 d->last_poll_data[d->last_idx]->last_count = cur_count;
827 d->last_idx = (d->last_idx + 1);
828 return 0;
829 }
831 /**
832 * Update latency counter or add new entry if it doesn't exist
833 */
834 static int update_last(struct ceph_daemon *d, const char *ds_n, int index,
835 double cur_sum, uint64_t cur_count)
836 {
837 if((d->last_idx > index) && (strcmp(d->last_poll_data[index]->ds_name, ds_n) == 0))
838 {
839 d->last_poll_data[index]->last_sum = cur_sum;
840 d->last_poll_data[index]->last_count = cur_count;
841 return 0;
842 }
844 if(!d->last_poll_data)
845 {
846 d->last_poll_data = malloc(1 * sizeof(struct last_data *));
847 if(!d->last_poll_data)
848 {
849 return -ENOMEM;
850 }
851 }
852 else
853 {
854 struct last_data **tmp_last = realloc(d->last_poll_data,
855 ((d->last_idx+1) * sizeof(struct last_data *)));
856 if(!tmp_last)
857 {
858 return -ENOMEM;
859 }
860 d->last_poll_data = tmp_last;
861 }
862 return add_last(d, ds_n, cur_sum, cur_count);
863 }
865 /**
866 * If using index guess failed (shouldn't happen, but possible if counters
867 * get rearranged), resort to searching for counter name
868 */
869 static int backup_search_for_last_avg(struct ceph_daemon *d, const char *ds_n)
870 {
871 int i = 0;
872 for(; i < d->last_idx; i++)
873 {
874 if(strcmp(d->last_poll_data[i]->ds_name, ds_n) == 0)
875 {
876 return i;
877 }
878 }
879 return -1;
880 }
882 /**
883 * Calculate average b/t current data and last poll data
884 * if last poll data exists
885 */
886 static double get_last_avg(struct ceph_daemon *d, const char *ds_n, int index,
887 double cur_sum, uint64_t cur_count)
888 {
889 double result = -1.1, sum_delt = 0.0;
890 uint64_t count_delt = 0;
891 int tmp_index = 0;
892 if(d->last_idx > index)
893 {
894 if(strcmp(d->last_poll_data[index]->ds_name, ds_n) == 0)
895 {
896 tmp_index = index;
897 }
898 //test previous index
899 else if((index > 0) && (strcmp(d->last_poll_data[index-1]->ds_name, ds_n) == 0))
900 {
901 tmp_index = (index - 1);
902 }
903 else
904 {
905 tmp_index = backup_search_for_last_avg(d, ds_n);
906 }
908 if((tmp_index > -1) && (cur_count > d->last_poll_data[tmp_index]->last_count))
909 {
910 sum_delt = (cur_sum - d->last_poll_data[tmp_index]->last_sum);
911 count_delt = (cur_count - d->last_poll_data[tmp_index]->last_count);
912 result = (sum_delt / count_delt);
913 }
914 }
916 if(result == -1.1)
917 {
918 result = NAN;
919 }
920 if(update_last(d, ds_n, tmp_index, cur_sum, cur_count) == -ENOMEM)
921 {
922 return -ENOMEM;
923 }
924 return result;
925 }
927 /**
928 * If using index guess failed, resort to searching for counter name
929 */
930 static uint32_t backup_search_for_type(struct ceph_daemon *d, char *ds_name)
931 {
932 int idx = 0;
933 for(; idx < d->ds_num; idx++)
934 {
935 if(strcmp(d->ds_names[idx], ds_name) == 0)
936 {
937 return d->ds_types[idx];
938 }
939 }
940 return DSET_TYPE_UNFOUND;
941 }
943 /**
944 * Process counter data and dispatch values
945 */
946 static int node_handler_fetch_data(void *arg, const char *val, const char *key)
947 {
948 value_t uv;
949 double tmp_d;
950 uint64_t tmp_u;
951 struct values_tmp *vtmp = (struct values_tmp*) arg;
952 uint32_t type = DSET_TYPE_UNFOUND;
953 int index = vtmp->index;
955 char ds_name[DATA_MAX_NAME_LEN];
956 memset(ds_name, 0, sizeof(ds_name));
958 if(parse_keys(key, ds_name))
959 {
960 return 1;
961 }
963 if(index >= vtmp->d->ds_num)
964 {
965 //don't overflow bounds of array
966 index = (vtmp->d->ds_num - 1);
967 }
969 /**
970 * counters should remain in same order we parsed schema... we maintain the
971 * index variable to keep track of current point in list of counters. first
972 * use index to guess point in array for retrieving type. if that doesn't
973 * work, use the old way to get the counter type
974 */
975 if(strcmp(ds_name, vtmp->d->ds_names[index]) == 0)
976 {
977 //found match
978 type = vtmp->d->ds_types[index];
979 }
980 else if((index > 0) && (strcmp(ds_name, vtmp->d->ds_names[index-1]) == 0))
981 {
982 //try previous key
983 type = vtmp->d->ds_types[index-1];
984 }
986 if(type == DSET_TYPE_UNFOUND)
987 {
988 //couldn't find right type by guessing, check the old way
989 type = backup_search_for_type(vtmp->d, ds_name);
990 }
992 switch(type)
993 {
994 case DSET_LATENCY:
995 if(vtmp->avgcount_exists == -1)
996 {
997 sscanf(val, "%" PRIu64, &vtmp->avgcount);
998 vtmp->avgcount_exists = 0;
999 //return after saving avgcount - don't dispatch value
1000 //until latency calculation
1001 return 0;
1002 }
1003 else
1004 {
1005 double sum, result;
1006 sscanf(val, "%lf", &sum);
1008 if(vtmp->avgcount == 0)
1009 {
1010 vtmp->avgcount = 1;
1011 }
1013 /** User wants latency values as long run avg */
1014 if(long_run_latency_avg)
1015 {
1016 result = (sum / vtmp->avgcount);
1017 }
1018 else
1019 {
1020 result = get_last_avg(vtmp->d, ds_name, vtmp->latency_index, sum, vtmp->avgcount);
1021 if(result == -ENOMEM)
1022 {
1023 return -ENOMEM;
1024 }
1025 }
1027 uv.gauge = result;
1028 vtmp->avgcount_exists = -1;
1029 vtmp->latency_index = (vtmp->latency_index + 1);
1030 }
1031 break;
1032 case DSET_BYTES:
1033 sscanf(val, "%lf", &tmp_d);
1034 uv.gauge = tmp_d;
1035 break;
1036 case DSET_RATE:
1037 sscanf(val, "%" PRIu64, &tmp_u);
1038 uv.derive = tmp_u;
1039 break;
1040 case DSET_TYPE_UNFOUND:
1041 default:
1042 ERROR("ceph plugin: ds %s was not properly initialized.", ds_name);
1043 return -1;
1044 }
1046 sstrncpy(vtmp->vlist.type, ceph_dset_types[type], sizeof(vtmp->vlist.type));
1047 sstrncpy(vtmp->vlist.type_instance, ds_name, sizeof(vtmp->vlist.type_instance));
1048 vtmp->vlist.values = &uv;
1049 vtmp->vlist.values_len = 1;
1051 vtmp->index = (vtmp->index + 1);
1052 plugin_dispatch_values(&vtmp->vlist);
1054 return 0;
1055 }
1057 static int cconn_connect(struct cconn *io)
1058 {
1059 struct sockaddr_un address;
1060 int flags, fd, err;
1061 if(io->state != CSTATE_UNCONNECTED)
1062 {
1063 ERROR("ceph plugin: cconn_connect: io->state != CSTATE_UNCONNECTED");
1064 return -EDOM;
1065 }
1066 fd = socket(PF_UNIX, SOCK_STREAM, 0);
1067 if(fd < 0)
1068 {
1069 int err = -errno;
1070 ERROR("ceph plugin: cconn_connect: socket(PF_UNIX, SOCK_STREAM, 0) "
1071 "failed: error %d", err);
1072 return err;
1073 }
1074 memset(&address, 0, sizeof(struct sockaddr_un));
1075 address.sun_family = AF_UNIX;
1076 snprintf(address.sun_path, sizeof(address.sun_path), "%s",
1077 io->d->asok_path);
1078 RETRY_ON_EINTR(err,
1079 connect(fd, (struct sockaddr *) &address, sizeof(struct sockaddr_un)));
1080 if(err < 0)
1081 {
1082 ERROR("ceph plugin: cconn_connect: connect(%d) failed: error %d",
1083 fd, err);
1084 return err;
1085 }
1087 flags = fcntl(fd, F_GETFL, 0);
1088 if(fcntl(fd, F_SETFL, flags | O_NONBLOCK) != 0)
1089 {
1090 err = -errno;
1091 ERROR("ceph plugin: cconn_connect: fcntl(%d, O_NONBLOCK) error %d",
1092 fd, err);
1093 return err;
1094 }
1095 io->asok = fd;
1096 io->state = CSTATE_WRITE_REQUEST;
1097 io->amt = 0;
1098 io->json_len = 0;
1099 io->json = NULL;
1100 return 0;
1101 }
1103 static void cconn_close(struct cconn *io)
1104 {
1105 io->state = CSTATE_UNCONNECTED;
1106 if(io->asok != -1)
1107 {
1108 int res;
1109 RETRY_ON_EINTR(res, close(io->asok));
1110 }
1111 io->asok = -1;
1112 io->amt = 0;
1113 io->json_len = 0;
1114 sfree(io->json);
1115 io->json = NULL;
1116 }
1118 /* Process incoming JSON counter data */
1119 static int
1120 cconn_process_data(struct cconn *io, yajl_struct *yajl, yajl_handle hand)
1121 {
1122 int ret;
1123 struct values_tmp *vtmp = calloc(1, sizeof(struct values_tmp) * 1);
1124 if(!vtmp)
1125 {
1126 return -ENOMEM;
1127 }
1129 vtmp->vlist = (value_list_t)VALUE_LIST_INIT;
1130 sstrncpy(vtmp->vlist.host, hostname_g, sizeof(vtmp->vlist.host));
1131 sstrncpy(vtmp->vlist.plugin, "ceph", sizeof(vtmp->vlist.plugin));
1132 sstrncpy(vtmp->vlist.plugin_instance, io->d->name, sizeof(vtmp->vlist.plugin_instance));
1134 vtmp->d = io->d;
1135 vtmp->avgcount_exists = -1;
1136 vtmp->latency_index = 0;
1137 vtmp->index = 0;
1138 yajl->handler_arg = vtmp;
1139 ret = traverse_json(io->json, io->json_len, hand);
1140 sfree(vtmp);
1141 return ret;
1142 }
1144 /**
1145 * Initiate JSON parsing and print error if one occurs
1146 */
1147 static int cconn_process_json(struct cconn *io)
1148 {
1149 if((io->request_type != ASOK_REQ_DATA) &&
1150 (io->request_type != ASOK_REQ_SCHEMA))
1151 {
1152 return -EDOM;
1153 }
1155 int result = 1;
1156 yajl_handle hand;
1157 yajl_status status;
1159 hand = yajl_alloc(&callbacks,
1160 #if HAVE_YAJL_V2
1161 /* alloc funcs = */ NULL,
1162 #else
1163 /* alloc funcs = */ NULL, NULL,
1164 #endif
1165 /* context = */ (void *)(&io->yajl));
1167 if(!hand)
1168 {
1169 ERROR ("ceph plugin: yajl_alloc failed.");
1170 return ENOMEM;
1171 }
1173 io->yajl.depth = 0;
1175 switch(io->request_type)
1176 {
1177 case ASOK_REQ_DATA:
1178 io->yajl.handler = node_handler_fetch_data;
1179 result = cconn_process_data(io, &io->yajl, hand);
1180 break;
1181 case ASOK_REQ_SCHEMA:
1182 //init daemon specific variables
1183 io->d->ds_num = 0;
1184 io->d->last_idx = 0;
1185 io->d->last_poll_data = NULL;
1186 io->yajl.handler = node_handler_define_schema;
1187 io->yajl.handler_arg = io->d;
1188 result = traverse_json(io->json, io->json_len, hand);
1189 break;
1190 }
1192 if(result)
1193 {
1194 goto done;
1195 }
1197 #if HAVE_YAJL_V2
1198 status = yajl_complete_parse(hand);
1199 #else
1200 status = yajl_parse_complete(hand);
1201 #endif
1203 if (status != yajl_status_ok)
1204 {
1205 unsigned char *errmsg = yajl_get_error (hand, /* verbose = */ 0,
1206 /* jsonText = */ NULL, /* jsonTextLen = */ 0);
1207 ERROR ("ceph plugin: yajl_parse_complete failed: %s",
1208 (char *) errmsg);
1209 yajl_free_error (hand, errmsg);
1210 yajl_free (hand);
1211 return 1;
1212 }
1214 done:
1215 yajl_free (hand);
1216 return result;
1217 }
1219 static int cconn_validate_revents(struct cconn *io, int revents)
1220 {
1221 if(revents & POLLERR)
1222 {
1223 ERROR("ceph plugin: cconn_validate_revents(name=%s): got POLLERR",
1224 io->d->name);
1225 return -EIO;
1226 }
1227 switch (io->state)
1228 {
1229 case CSTATE_WRITE_REQUEST:
1230 return (revents & POLLOUT) ? 0 : -EINVAL;
1231 case CSTATE_READ_VERSION:
1232 case CSTATE_READ_AMT:
1233 case CSTATE_READ_JSON:
1234 return (revents & POLLIN) ? 0 : -EINVAL;
1235 default:
1236 ERROR("ceph plugin: cconn_validate_revents(name=%s) got to "
1237 "illegal state on line %d", io->d->name, __LINE__);
1238 return -EDOM;
1239 }
1240 }
1242 /** Handle a network event for a connection */
1243 static int cconn_handle_event(struct cconn *io)
1244 {
1245 int ret;
1246 switch (io->state)
1247 {
1248 case CSTATE_UNCONNECTED:
1249 ERROR("ceph plugin: cconn_handle_event(name=%s) got to illegal "
1250 "state on line %d", io->d->name, __LINE__);
1252 return -EDOM;
1253 case CSTATE_WRITE_REQUEST:
1254 {
1255 char cmd[32];
1256 snprintf(cmd, sizeof(cmd), "%s%d%s", "{ \"prefix\": \"",
1257 io->request_type, "\" }\n");
1258 size_t cmd_len = strlen(cmd);
1259 RETRY_ON_EINTR(ret,
1260 write(io->asok, ((char*)&cmd) + io->amt, cmd_len - io->amt));
1261 DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,amt=%d,ret=%d)",
1262 io->d->name, io->state, io->amt, ret);
1263 if(ret < 0)
1264 {
1265 return ret;
1266 }
1267 io->amt += ret;
1268 if(io->amt >= cmd_len)
1269 {
1270 io->amt = 0;
1271 switch (io->request_type)
1272 {
1273 case ASOK_REQ_VERSION:
1274 io->state = CSTATE_READ_VERSION;
1275 break;
1276 default:
1277 io->state = CSTATE_READ_AMT;
1278 break;
1279 }
1280 }
1281 return 0;
1282 }
1283 case CSTATE_READ_VERSION:
1284 {
1285 RETRY_ON_EINTR(ret,
1286 read(io->asok, ((char*)(&io->d->version)) + io->amt,
1287 sizeof(io->d->version) - io->amt));
1288 DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,ret=%d)",
1289 io->d->name, io->state, ret);
1290 if(ret < 0)
1291 {
1292 return ret;
1293 }
1294 io->amt += ret;
1295 if(io->amt >= sizeof(io->d->version))
1296 {
1297 io->d->version = ntohl(io->d->version);
1298 if(io->d->version != 1)
1299 {
1300 ERROR("ceph plugin: cconn_handle_event(name=%s) not "
1301 "expecting version %d!", io->d->name, io->d->version);
1302 return -ENOTSUP;
1303 }
1304 DEBUG("ceph plugin: cconn_handle_event(name=%s): identified as "
1305 "version %d", io->d->name, io->d->version);
1306 io->amt = 0;
1307 cconn_close(io);
1308 io->request_type = ASOK_REQ_SCHEMA;
1309 }
1310 return 0;
1311 }
1312 case CSTATE_READ_AMT:
1313 {
1314 RETRY_ON_EINTR(ret,
1315 read(io->asok, ((char*)(&io->json_len)) + io->amt,
1316 sizeof(io->json_len) - io->amt));
1317 DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,ret=%d)",
1318 io->d->name, io->state, ret);
1319 if(ret < 0)
1320 {
1321 return ret;
1322 }
1323 io->amt += ret;
1324 if(io->amt >= sizeof(io->json_len))
1325 {
1326 io->json_len = ntohl(io->json_len);
1327 io->amt = 0;
1328 io->state = CSTATE_READ_JSON;
1329 io->json = calloc(1, io->json_len + 1);
1330 if(!io->json)
1331 {
1332 ERROR("ceph plugin: error callocing io->json");
1333 return -ENOMEM;
1334 }
1335 }
1336 return 0;
1337 }
1338 case CSTATE_READ_JSON:
1339 {
1340 RETRY_ON_EINTR(ret,
1341 read(io->asok, io->json + io->amt, io->json_len - io->amt));
1342 DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,ret=%d)",
1343 io->d->name, io->state, ret);
1344 if(ret < 0)
1345 {
1346 return ret;
1347 }
1348 io->amt += ret;
1349 if(io->amt >= io->json_len)
1350 {
1351 ret = cconn_process_json(io);
1352 if(ret)
1353 {
1354 return ret;
1355 }
1356 cconn_close(io);
1357 io->request_type = ASOK_REQ_NONE;
1358 }
1359 return 0;
1360 }
1361 default:
1362 ERROR("ceph plugin: cconn_handle_event(name=%s) got to illegal "
1363 "state on line %d", io->d->name, __LINE__);
1364 return -EDOM;
1365 }
1366 }
1368 static int cconn_prepare(struct cconn *io, struct pollfd* fds)
1369 {
1370 int ret;
1371 if(io->request_type == ASOK_REQ_NONE)
1372 {
1373 /* The request has already been serviced. */
1374 return 0;
1375 }
1376 else if((io->request_type == ASOK_REQ_DATA) && (io->d->ds_num == 0))
1377 {
1378 /* If there are no counters to report on, don't bother
1379 * connecting */
1380 return 0;
1381 }
1383 switch (io->state)
1384 {
1385 case CSTATE_UNCONNECTED:
1386 ret = cconn_connect(io);
1387 if(ret > 0)
1388 {
1389 return -ret;
1390 }
1391 else if(ret < 0)
1392 {
1393 return ret;
1394 }
1395 fds->fd = io->asok;
1396 fds->events = POLLOUT;
1397 return 1;
1398 case CSTATE_WRITE_REQUEST:
1399 fds->fd = io->asok;
1400 fds->events = POLLOUT;
1401 return 1;
1402 case CSTATE_READ_VERSION:
1403 case CSTATE_READ_AMT:
1404 case CSTATE_READ_JSON:
1405 fds->fd = io->asok;
1406 fds->events = POLLIN;
1407 return 1;
1408 default:
1409 ERROR("ceph plugin: cconn_prepare(name=%s) got to illegal state "
1410 "on line %d", io->d->name, __LINE__);
1411 return -EDOM;
1412 }
1413 }
1415 /** Returns the difference between two struct timevals in milliseconds.
1416 * On overflow, we return max/min int.
1417 */
1418 static int milli_diff(const struct timeval *t1, const struct timeval *t2)
1419 {
1420 int64_t ret;
1421 int sec_diff = t1->tv_sec - t2->tv_sec;
1422 int usec_diff = t1->tv_usec - t2->tv_usec;
1423 ret = usec_diff / 1000;
1424 ret += (sec_diff * 1000);
1425 return (ret > INT_MAX) ? INT_MAX : ((ret < INT_MIN) ? INT_MIN : (int)ret);
1426 }
1428 /** This handles the actual network I/O to talk to the Ceph daemons.
1429 */
1430 static int cconn_main_loop(uint32_t request_type)
1431 {
1432 int i, ret, some_unreachable = 0;
1433 struct timeval end_tv;
1434 struct cconn io_array[g_num_daemons];
1436 DEBUG("ceph plugin: entering cconn_main_loop(request_type = %d)", request_type);
1438 /* create cconn array */
1439 memset(io_array, 0, sizeof(io_array));
1440 for(i = 0; i < g_num_daemons; ++i)
1441 {
1442 io_array[i].d = g_daemons[i];
1443 io_array[i].request_type = request_type;
1444 io_array[i].state = CSTATE_UNCONNECTED;
1445 }
1447 /** Calculate the time at which we should give up */
1448 gettimeofday(&end_tv, NULL);
1449 end_tv.tv_sec += CEPH_TIMEOUT_INTERVAL;
1451 while (1)
1452 {
1453 int nfds, diff;
1454 struct timeval tv;
1455 struct cconn *polled_io_array[g_num_daemons];
1456 struct pollfd fds[g_num_daemons];
1457 memset(fds, 0, sizeof(fds));
1458 nfds = 0;
1459 for(i = 0; i < g_num_daemons; ++i)
1460 {
1461 struct cconn *io = io_array + i;
1462 ret = cconn_prepare(io, fds + nfds);
1463 if(ret < 0)
1464 {
1465 WARNING("ceph plugin: cconn_prepare(name=%s,i=%d,st=%d)=%d",
1466 io->d->name, i, io->state, ret);
1467 cconn_close(io);
1468 io->request_type = ASOK_REQ_NONE;
1469 some_unreachable = 1;
1470 }
1471 else if(ret == 1)
1472 {
1473 polled_io_array[nfds++] = io_array + i;
1474 }
1475 }
1476 if(nfds == 0)
1477 {
1478 /* finished */
1479 ret = 0;
1480 goto done;
1481 }
1482 gettimeofday(&tv, NULL);
1483 diff = milli_diff(&end_tv, &tv);
1484 if(diff <= 0)
1485 {
1486 /* Timed out */
1487 ret = -ETIMEDOUT;
1488 WARNING("ceph plugin: cconn_main_loop: timed out.");
1489 goto done;
1490 }
1491 RETRY_ON_EINTR(ret, poll(fds, nfds, diff));
1492 if(ret < 0)
1493 {
1494 ERROR("ceph plugin: poll(2) error: %d", ret);
1495 goto done;
1496 }
1497 for(i = 0; i < nfds; ++i)
1498 {
1499 struct cconn *io = polled_io_array[i];
1500 int revents = fds[i].revents;
1501 if(revents == 0)
1502 {
1503 /* do nothing */
1504 }
1505 else if(cconn_validate_revents(io, revents))
1506 {
1507 WARNING("ceph plugin: cconn(name=%s,i=%d,st=%d): "
1508 "revents validation error: "
1509 "revents=0x%08x", io->d->name, i, io->state, revents);
1510 cconn_close(io);
1511 io->request_type = ASOK_REQ_NONE;
1512 some_unreachable = 1;
1513 }
1514 else
1515 {
1516 int ret = cconn_handle_event(io);
1517 if(ret)
1518 {
1519 WARNING("ceph plugin: cconn_handle_event(name=%s,"
1520 "i=%d,st=%d): error %d", io->d->name, i, io->state, ret);
1521 cconn_close(io);
1522 io->request_type = ASOK_REQ_NONE;
1523 some_unreachable = 1;
1524 }
1525 }
1526 }
1527 }
1528 done: for(i = 0; i < g_num_daemons; ++i)
1529 {
1530 cconn_close(io_array + i);
1531 }
1532 if(some_unreachable)
1533 {
1534 DEBUG("ceph plugin: cconn_main_loop: some Ceph daemons were unreachable.");
1535 }
1536 else
1537 {
1538 DEBUG("ceph plugin: cconn_main_loop: reached all Ceph daemons :)");
1539 }
1540 return ret;
1541 }
1543 static int ceph_read(void)
1544 {
1545 return cconn_main_loop(ASOK_REQ_DATA);
1546 }
1548 /******* lifecycle *******/
1549 static int ceph_init(void)
1550 {
1551 int ret;
1552 ceph_daemons_print();
1554 ret = cconn_main_loop(ASOK_REQ_VERSION);
1556 return (ret) ? ret : 0;
1557 }
1559 static int ceph_shutdown(void)
1560 {
1561 int i;
1562 for(i = 0; i < g_num_daemons; ++i)
1563 {
1564 ceph_daemon_free(g_daemons[i]);
1565 }
1566 sfree(g_daemons);
1567 g_daemons = NULL;
1568 g_num_daemons = 0;
1569 DEBUG("ceph plugin: finished ceph_shutdown");
1570 return 0;
1571 }
1573 void module_register(void)
1574 {
1575 plugin_register_complex_config("ceph", ceph_config);
1576 plugin_register_init("ceph", ceph_init);
1577 plugin_register_read("ceph", ceph_read);
1578 plugin_register_shutdown("ceph", ceph_shutdown);
1579 }