e8bde9b4967936dfbfaeb0b3cd28b1ff15f8a7fe
1 /**
2 * collectd - src/ceph.c
3 * Copyright (C) 2011 New Dream Network
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; only version 2 of the License is applicable.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 *
18 * Authors:
19 * Colin McCabe <cmccabe@alumni.cmu.edu>
20 * Dennis Zou <yunzou@cisco.com>
21 * Dan Ryder <daryder@cisco.com>
22 **/
24 #define _BSD_SOURCE
26 #include "collectd.h"
27 #include "common.h"
28 #include "plugin.h"
30 #include <arpa/inet.h>
31 #include <errno.h>
32 #include <fcntl.h>
33 #include <yajl/yajl_parse.h>
34 #if HAVE_YAJL_YAJL_VERSION_H
35 #include <yajl/yajl_version.h>
36 #endif
38 #include <limits.h>
39 #include <poll.h>
40 #include <stdint.h>
41 #include <stdio.h>
42 #include <stdlib.h>
43 #include <string.h>
44 #include <strings.h>
45 #include <sys/socket.h>
46 #include <sys/time.h>
47 #include <sys/types.h>
48 #include <sys/un.h>
49 #include <unistd.h>
50 #include <math.h>
51 #include <inttypes.h>
53 #define RETRY_AVGCOUNT -1
55 #if defined(YAJL_MAJOR) && (YAJL_MAJOR > 1)
56 # define HAVE_YAJL_V2 1
57 #endif
59 #define RETRY_ON_EINTR(ret, expr) \
60 while(1) { \
61 ret = expr; \
62 if(ret >= 0) \
63 break; \
64 ret = -errno; \
65 if(ret != -EINTR) \
66 break; \
67 }
69 /** Timeout interval in seconds */
70 #define CEPH_TIMEOUT_INTERVAL 1
72 /** Maximum path length for a UNIX domain socket on this system */
73 #define UNIX_DOMAIN_SOCK_PATH_MAX (sizeof(((struct sockaddr_un*)0)->sun_path))
75 /** Yajl callback returns */
76 #define CEPH_CB_CONTINUE 1
77 #define CEPH_CB_ABORT 0
79 #if HAVE_YAJL_V2
80 typedef size_t yajl_len_t;
81 #else
82 typedef unsigned int yajl_len_t;
83 #endif
85 /** Number of types for ceph defined in types.db */
86 #define CEPH_DSET_TYPES_NUM 3
87 /** ceph types enum */
88 enum ceph_dset_type_d
89 {
90 DSET_LATENCY = 0,
91 DSET_BYTES = 1,
92 DSET_RATE = 2,
93 DSET_TYPE_UNFOUND = 1000
94 };
96 /** Valid types for ceph defined in types.db */
97 const char * ceph_dset_types [CEPH_DSET_TYPES_NUM] =
98 {"ceph_latency", "ceph_bytes", "ceph_rate"};
100 /******* ceph_daemon *******/
101 struct ceph_daemon
102 {
103 /** Version of the admin_socket interface */
104 uint32_t version;
105 /** daemon name **/
106 char name[DATA_MAX_NAME_LEN];
108 /** Path to the socket that we use to talk to the ceph daemon */
109 char asok_path[UNIX_DOMAIN_SOCK_PATH_MAX];
111 /** Number of counters */
112 int ds_num;
113 /** Track ds types */
114 uint32_t *ds_types;
115 /** Track ds names to match with types */
116 char **ds_names;
117 };
119 /******* JSON parsing *******/
120 typedef int (*node_handler_t)(void *, const char*, const char*);
122 /** Track state and handler while parsing JSON */
123 struct yajl_struct
124 {
125 node_handler_t handler;
126 void * handler_arg;
127 struct {
128 char key[DATA_MAX_NAME_LEN];
129 int key_len;
130 } state[YAJL_MAX_DEPTH];
131 int depth;
132 };
133 typedef struct yajl_struct yajl_struct;
135 /**
136 * Keep track of last data for latency values so we can calculate rate
137 * since last poll.
138 */
139 struct last_data **last_poll_data = NULL;
140 /** index of last poll data */
141 int last_idx = 0;
143 enum perfcounter_type_d
144 {
145 PERFCOUNTER_LATENCY = 0x4, PERFCOUNTER_DERIVE = 0x8,
146 };
148 /** Give user option to use default (long run = since daemon started) avg */
149 static int long_run_latency_avg = 0;
151 /**
152 * Give user option to use default type for special cases -
153 * filestore.journal_wr_bytes is currently only metric here. Ceph reports the
154 * type as a sum/count pair and will calculate it the same as a latency value.
155 * All other "bytes" metrics (excluding the used/capacity bytes for the OSD)
156 * use the DERIVE type. Unless user specifies to use given type, convert this
157 * metric to use DERIVE.
158 */
159 static int convert_special_metrics = 1;
161 /** Array of daemons to monitor */
162 static struct ceph_daemon **g_daemons = NULL;
164 /** Number of elements in g_daemons */
165 static int g_num_daemons = 0;
167 /**
168 * A set of data that we build up in memory while parsing the JSON.
169 */
170 struct values_tmp
171 {
172 /** ceph daemon we are processing data for*/
173 struct ceph_daemon *d;
174 /** track avgcount across counters for avgcount/sum latency pairs */
175 uint64_t avgcount;
176 /** current index of counters - used to get type of counter */
177 int index;
178 /** do we already have an avgcount for latency pair */
179 int avgcount_exists;
180 /**
181 * similar to index, but current index of latency type counters -
182 * used to get last poll data of counter
183 */
184 int latency_index;
185 /**
186 * values list - maintain across counters since
187 * host/plugin/plugin instance are always the same
188 */
189 value_list_t vlist;
190 };
192 /**
193 * A set of count/sum pairs to keep track of latency types and get difference
194 * between this poll data and last poll data.
195 */
196 struct last_data
197 {
198 char ds_name[DATA_MAX_NAME_LEN];
199 double last_sum;
200 uint64_t last_count;
201 };
204 /******* network I/O *******/
205 enum cstate_t
206 {
207 CSTATE_UNCONNECTED = 0,
208 CSTATE_WRITE_REQUEST,
209 CSTATE_READ_VERSION,
210 CSTATE_READ_AMT,
211 CSTATE_READ_JSON,
212 };
214 enum request_type_t
215 {
216 ASOK_REQ_VERSION = 0,
217 ASOK_REQ_DATA = 1,
218 ASOK_REQ_SCHEMA = 2,
219 ASOK_REQ_NONE = 1000,
220 };
222 struct cconn
223 {
224 /** The Ceph daemon that we're talking to */
225 struct ceph_daemon *d;
227 /** Request type */
228 uint32_t request_type;
230 /** The connection state */
231 enum cstate_t state;
233 /** The socket we use to talk to this daemon */
234 int asok;
236 /** The amount of data remaining to read / write. */
237 uint32_t amt;
239 /** Length of the JSON to read */
240 uint32_t json_len;
242 /** Buffer containing JSON data */
243 unsigned char *json;
245 /** Keep data important to yajl processing */
246 struct yajl_struct yajl;
247 };
249 static int ceph_cb_null(void *ctx)
250 {
251 return CEPH_CB_CONTINUE;
252 }
254 static int ceph_cb_boolean(void *ctx, int bool_val)
255 {
256 return CEPH_CB_CONTINUE;
257 }
259 static int
260 ceph_cb_number(void *ctx, const char *number_val, yajl_len_t number_len)
261 {
262 yajl_struct *yajl = (yajl_struct*)ctx;
263 char buffer[number_len+1];
264 int i, latency_type = 0, result;
265 char key[128];
267 memcpy(buffer, number_val, number_len);
268 buffer[sizeof(buffer) - 1] = 0;
270 ssnprintf(key, yajl->state[0].key_len, "%s", yajl->state[0].key);
271 for(i = 1; i < yajl->depth; i++)
272 {
273 if((i == yajl->depth-1) && ((strcmp(yajl->state[i].key,"avgcount") == 0)
274 || (strcmp(yajl->state[i].key,"sum") == 0)))
275 {
276 if(convert_special_metrics)
277 {
278 /**
279 * Special case for filestore:JournalWrBytes. For some reason,
280 * Ceph schema encodes this as a count/sum pair while all
281 * other "Bytes" data (excluding used/capacity bytes for OSD
282 * space) uses a single "Derive" type. To spare further
283 * confusion, keep this KPI as the same type of other "Bytes".
284 * Instead of keeping an "average" or "rate", use the "sum" in
285 * the pair and assign that to the derive value.
286 */
287 if((strcmp(yajl->state[i-1].key, "journal_wr_bytes") == 0) &&
288 (strcmp(yajl->state[i-2].key,"filestore") == 0) &&
289 (strcmp(yajl->state[i].key,"avgcount") == 0))
290 {
291 DEBUG("ceph plugin: Skipping avgcount for filestore.JournalWrBytes");
292 yajl->depth = (yajl->depth - 1);
293 return CEPH_CB_CONTINUE;
294 }
295 }
296 //probably a avgcount/sum pair. if not - we'll try full key later
297 latency_type = 1;
298 break;
299 }
300 strncat(key, ".", 1);
301 strncat(key, yajl->state[i].key, yajl->state[i].key_len+1);
302 }
304 result = yajl->handler(yajl->handler_arg, buffer, key);
306 if((result == RETRY_AVGCOUNT) && latency_type)
307 {
308 strncat(key, ".", 1);
309 strncat(key, yajl->state[yajl->depth-1].key,
310 yajl->state[yajl->depth-1].key_len+1);
311 result = yajl->handler(yajl->handler_arg, buffer, key);
312 }
314 if(result == -ENOMEM)
315 {
316 ERROR("ceph plugin: memory allocation failed");
317 return CEPH_CB_ABORT;
318 }
320 yajl->depth = (yajl->depth - 1);
321 return CEPH_CB_CONTINUE;
322 }
324 static int ceph_cb_string(void *ctx, const unsigned char *string_val,
325 yajl_len_t string_len)
326 {
327 return CEPH_CB_CONTINUE;
328 }
330 static int ceph_cb_start_map(void *ctx)
331 {
332 return CEPH_CB_CONTINUE;
333 }
335 static int
336 ceph_cb_map_key(void *ctx, const unsigned char *key, yajl_len_t string_len)
337 {
338 yajl_struct *yajl = (yajl_struct*)ctx;
340 if((yajl->depth+1) >= YAJL_MAX_DEPTH)
341 {
342 ERROR("ceph plugin: depth exceeds max, aborting.");
343 return CEPH_CB_ABORT;
344 }
346 char buffer[string_len+1];
348 memcpy(buffer, key, string_len);
349 buffer[sizeof(buffer) - 1] = 0;
351 snprintf(yajl->state[yajl->depth].key, sizeof(buffer), "%s", buffer);
352 yajl->state[yajl->depth].key_len = sizeof(buffer);
353 yajl->depth = (yajl->depth + 1);
355 return CEPH_CB_CONTINUE;
356 }
358 static int ceph_cb_end_map(void *ctx)
359 {
360 yajl_struct *yajl = (yajl_struct*)ctx;
362 yajl->depth = (yajl->depth - 1);
363 return CEPH_CB_CONTINUE;
364 }
366 static int ceph_cb_start_array(void *ctx)
367 {
368 return CEPH_CB_CONTINUE;
369 }
371 static int ceph_cb_end_array(void *ctx)
372 {
373 return CEPH_CB_CONTINUE;
374 }
376 static yajl_callbacks callbacks = {
377 ceph_cb_null,
378 ceph_cb_boolean,
379 NULL,
380 NULL,
381 ceph_cb_number,
382 ceph_cb_string,
383 ceph_cb_start_map,
384 ceph_cb_map_key,
385 ceph_cb_end_map,
386 ceph_cb_start_array,
387 ceph_cb_end_array
388 };
390 static void ceph_daemon_print(const struct ceph_daemon *d)
391 {
392 DEBUG("ceph plugin: name=%s, asok_path=%s", d->name, d->asok_path);
393 }
395 static void ceph_daemons_print(void)
396 {
397 int i;
398 for(i = 0; i < g_num_daemons; ++i)
399 {
400 ceph_daemon_print(g_daemons[i]);
401 }
402 }
404 static void ceph_daemon_free(struct ceph_daemon *d)
405 {
406 int i = 0;
407 for(; i < d->ds_num; i++)
408 {
409 sfree(d->ds_names[i]);
410 }
411 sfree(d->ds_types);
412 sfree(d->ds_names);
413 sfree(d);
414 }
416 /**
417 * Compact ds name by removing special characters and trimming length to
418 * DATA_MAX_NAME_LEN if necessary
419 */
420 static void compact_ds_name(char *source, char *dest)
421 {
422 int keys_num = 0, i;
423 char *save_ptr = NULL, *tmp_ptr = source;
424 char *keys[16];
425 char len_str[3];
426 char tmp[DATA_MAX_NAME_LEN];
427 size_t key_chars_remaining = (DATA_MAX_NAME_LEN-1);
428 int reserved = 0;
429 int offset = 0;
430 memset(tmp, 0, sizeof(tmp));
431 if(source == NULL || dest == NULL || source[0] == '\0' || dest[0] != '\0')
432 {
433 return;
434 }
435 size_t src_len = strlen(source);
436 snprintf(len_str, sizeof(len_str), "%zu", src_len);
437 unsigned char append_status = 0x0;
438 append_status |= (source[src_len - 1] == '-') ? 0x1 : 0x0;
439 append_status |= (source[src_len - 1] == '+') ? 0x2 : 0x0;
440 while ((keys[keys_num] = strtok_r(tmp_ptr, ":_-+", &save_ptr)) != NULL)
441 {
442 tmp_ptr = NULL;
443 /** capitalize 1st char **/
444 keys[keys_num][0] = toupper(keys[keys_num][0]);
445 keys_num++;
446 if(keys_num >= 16)
447 {
448 break;
449 }
450 }
451 /** concatenate each part of source string **/
452 for(i = 0; i < keys_num; i++)
453 {
454 strncat(tmp, keys[i], key_chars_remaining);
455 key_chars_remaining -= strlen(keys[i]);
456 }
457 tmp[DATA_MAX_NAME_LEN - 1] = '\0';
458 /** to coordinate limitation of length of type_instance
459 * we will truncate ds_name
460 * when the its length is more than
461 * DATA_MAX_NAME_LEN
462 */
463 if(strlen(tmp) > DATA_MAX_NAME_LEN - 1)
464 {
465 append_status |= 0x4;
466 /** we should reserve space for
467 * len_str
468 */
469 reserved += 2;
470 }
471 if(append_status & 0x1)
472 {
473 /** we should reserve space for
474 * "Minus"
475 */
476 reserved += 5;
477 }
478 if(append_status & 0x2)
479 {
480 /** we should reserve space for
481 * "Plus"
482 */
483 reserved += 4;
484 }
485 snprintf(dest, DATA_MAX_NAME_LEN - reserved, "%s", tmp);
486 offset = strlen(dest);
487 switch (append_status)
488 {
489 case 0x1:
490 memcpy(dest + offset, "Minus", 5);
491 break;
492 case 0x2:
493 memcpy(dest + offset, "Plus", 5);
494 break;
495 case 0x4:
496 memcpy(dest + offset, len_str, 2);
497 break;
498 case 0x5:
499 memcpy(dest + offset, "Minus", 5);
500 memcpy(dest + offset + 5, len_str, 2);
501 break;
502 case 0x6:
503 memcpy(dest + offset, "Plus", 4);
504 memcpy(dest + offset + 4, len_str, 2);
505 break;
506 default:
507 break;
508 }
509 }
511 /**
512 * Parse key to remove "type" if this is for schema and initiate compaction
513 */
514 static int parse_keys(const char *key_str, char *ds_name)
515 {
516 char *ptr, *rptr;
517 size_t ds_name_len = 0;
518 /**
519 * allow up to 100 characters before compaction - compact_ds_name will not
520 * allow more than DATA_MAX_NAME_LEN chars
521 */
522 int max_str_len = 100;
523 char tmp_ds_name[max_str_len];
524 memset(tmp_ds_name, 0, sizeof(tmp_ds_name));
525 if(ds_name == NULL || key_str == NULL || key_str[0] == '\0' ||
526 ds_name[0] != '\0')
527 {
528 return -1;
529 }
530 if((ptr = strchr(key_str, '.')) == NULL
531 || (rptr = strrchr(key_str, '.')) == NULL)
532 {
533 memcpy(tmp_ds_name, key_str, max_str_len - 1);
534 goto compact;
535 }
536 ds_name_len = (rptr - ptr) > max_str_len ? max_str_len : (rptr - ptr);
537 if((ds_name_len == 0) || strncmp(rptr + 1, "type", 4))
538 { /** copy whole key **/
539 memcpy(tmp_ds_name, key_str, max_str_len - 1);
540 }
541 else
542 {/** more than two keys **/
543 memcpy(tmp_ds_name, key_str, ((rptr - key_str) > (max_str_len - 1) ?
544 (max_str_len - 1) : (rptr - key_str)));
545 }
547 compact: compact_ds_name(tmp_ds_name, ds_name);
548 return 0;
549 }
551 /**
552 * while parsing ceph admin socket schema, save counter name and type for later
553 * data processing
554 */
555 static int ceph_daemon_add_ds_entry(struct ceph_daemon *d, const char *name,
556 int pc_type)
557 {
558 uint32_t type;
559 char ds_name[DATA_MAX_NAME_LEN];
560 memset(ds_name, 0, sizeof(ds_name));
562 if(convert_special_metrics)
563 {
564 /**
565 * Special case for filestore:JournalWrBytes. For some reason, Ceph
566 * schema encodes this as a count/sum pair while all other "Bytes" data
567 * (excluding used/capacity bytes for OSD space) uses a single "Derive"
568 * type. To spare further confusion, keep this KPI as the same type of
569 * other "Bytes". Instead of keeping an "average" or "rate", use the
570 * "sum" in the pair and assign that to the derive value.
571 */
572 if((strcmp(name,"filestore.journal_wr_bytes.type") == 0))
573 {
574 pc_type = 10;
575 }
576 }
578 d->ds_names = realloc(d->ds_names, sizeof(char *) * (d->ds_num + 1));
579 if(!d->ds_names)
580 {
581 return -ENOMEM;
582 }
584 d->ds_types = realloc(d->ds_types, sizeof(uint32_t) * (d->ds_num + 1));
585 if(!d->ds_types)
586 {
587 return -ENOMEM;
588 }
590 d->ds_names[d->ds_num] = malloc(sizeof(char) * DATA_MAX_NAME_LEN);
591 if(!d->ds_names[d->ds_num])
592 {
593 return -ENOMEM;
594 }
596 type = (pc_type & PERFCOUNTER_DERIVE) ? DSET_RATE :
597 ((pc_type & PERFCOUNTER_LATENCY) ? DSET_LATENCY : DSET_BYTES);
598 d->ds_types[d->ds_num] = type;
600 if(parse_keys(name, ds_name))
601 {
602 return 1;
603 }
605 sstrncpy(d->ds_names[d->ds_num], ds_name, DATA_MAX_NAME_LEN -1);
606 d->ds_num = (d->ds_num + 1);
608 return 0;
609 }
611 /******* ceph_config *******/
612 static int cc_handle_str(struct oconfig_item_s *item, char *dest, int dest_len)
613 {
614 const char *val;
615 if(item->values_num != 1)
616 {
617 return -ENOTSUP;
618 }
619 if(item->values[0].type != OCONFIG_TYPE_STRING)
620 {
621 return -ENOTSUP;
622 }
623 val = item->values[0].value.string;
624 if(snprintf(dest, dest_len, "%s", val) > (dest_len - 1))
625 {
626 ERROR("ceph plugin: configuration parameter '%s' is too long.\n",
627 item->key);
628 return -ENAMETOOLONG;
629 }
630 return 0;
631 }
633 static int cc_handle_bool(struct oconfig_item_s *item, int *dest)
634 {
635 if(item->values_num != 1)
636 {
637 return -ENOTSUP;
638 }
640 if(item->values[0].type != OCONFIG_TYPE_BOOLEAN)
641 {
642 return -ENOTSUP;
643 }
645 *dest = (item->values[0].value.boolean) ? 1 : 0;
646 return 0;
647 }
649 static int cc_add_daemon_config(oconfig_item_t *ci)
650 {
651 int ret, i;
652 struct ceph_daemon *array, *nd, cd;
653 memset(&cd, 0, sizeof(struct ceph_daemon));
655 if((ci->values_num != 1) || (ci->values[0].type != OCONFIG_TYPE_STRING))
656 {
657 WARNING("ceph plugin: `Daemon' blocks need exactly one string "
658 "argument.");
659 return (-1);
660 }
662 ret = cc_handle_str(ci, cd.name, DATA_MAX_NAME_LEN);
663 if(ret)
664 {
665 return ret;
666 }
668 for(i=0; i < ci->children_num; i++)
669 {
670 oconfig_item_t *child = ci->children + i;
672 if(strcasecmp("SocketPath", child->key) == 0)
673 {
674 ret = cc_handle_str(child, cd.asok_path, sizeof(cd.asok_path));
675 if(ret)
676 {
677 return ret;
678 }
679 }
680 else
681 {
682 WARNING("ceph plugin: ignoring unknown option %s", child->key);
683 }
684 }
685 if(cd.name[0] == '\0')
686 {
687 ERROR("ceph plugin: you must configure a daemon name.\n");
688 return -EINVAL;
689 }
690 else if(cd.asok_path[0] == '\0')
691 {
692 ERROR("ceph plugin(name=%s): you must configure an administrative "
693 "socket path.\n", cd.name);
694 return -EINVAL;
695 }
696 else if(!((cd.asok_path[0] == '/') ||
697 (cd.asok_path[0] == '.' && cd.asok_path[1] == '/')))
698 {
699 ERROR("ceph plugin(name=%s): administrative socket paths must begin "
700 "with '/' or './' Can't parse: '%s'\n", cd.name, cd.asok_path);
701 return -EINVAL;
702 }
703 array = realloc(g_daemons,
704 sizeof(struct ceph_daemon *) * (g_num_daemons + 1));
705 if(array == NULL)
706 {
707 /* The positive return value here indicates that this is a
708 * runtime error, not a configuration error. */
709 return ENOMEM;
710 }
711 g_daemons = (struct ceph_daemon**) array;
712 nd = malloc(sizeof(struct ceph_daemon));
713 if(!nd)
714 {
715 return ENOMEM;
716 }
717 memcpy(nd, &cd, sizeof(struct ceph_daemon));
718 g_daemons[g_num_daemons++] = nd;
719 return 0;
720 }
722 static int ceph_config(oconfig_item_t *ci)
723 {
724 int ret, i;
726 for(i = 0; i < ci->children_num; ++i)
727 {
728 oconfig_item_t *child = ci->children + i;
729 if(strcasecmp("Daemon", child->key) == 0)
730 {
731 ret = cc_add_daemon_config(child);
732 if(ret == ENOMEM)
733 {
734 ERROR("ceph plugin: Couldn't allocate memory");
735 return ret;
736 }
737 else if(ret)
738 {
739 //process other daemons and ignore this one
740 continue;
741 }
742 }
743 else if(strcasecmp("LongRunAvgLatency", child->key) == 0)
744 {
745 ret = cc_handle_bool(child, &long_run_latency_avg);
746 if(ret)
747 {
748 return ret;
749 }
750 }
751 else if(strcasecmp("ConvertSpecialMetricTypes", child->key) == 0)
752 {
753 ret = cc_handle_bool(child, &convert_special_metrics);
754 if(ret)
755 {
756 return ret;
757 }
758 }
759 else
760 {
761 WARNING("ceph plugin: ignoring unknown option %s", child->key);
762 }
763 }
764 return 0;
765 }
767 /**
768 * Parse JSON and get error message if present
769 */
770 static int
771 traverse_json(const unsigned char *json, uint32_t json_len, yajl_handle hand)
772 {
773 yajl_status status = yajl_parse(hand, json, json_len);
774 unsigned char *msg;
776 switch(status)
777 {
778 case yajl_status_error:
779 msg = yajl_get_error(hand, /* verbose = */ 1,
780 /* jsonText = */ (unsigned char *) json,
781 (unsigned int) json_len);
782 ERROR ("ceph plugin: yajl_parse failed: %s", msg);
783 yajl_free_error(hand, msg);
784 return 1;
785 case yajl_status_client_canceled:
786 return 1;
787 default:
788 return 0;
789 }
790 }
792 /**
793 * Add entry for each counter while parsing schema
794 */
795 static int
796 node_handler_define_schema(void *arg, const char *val, const char *key)
797 {
798 struct ceph_daemon *d = (struct ceph_daemon *) arg;
799 int pc_type;
800 pc_type = atoi(val);
801 DEBUG("ceph plugin: ceph_daemon_add_ds_entry(d=%s,key=%s,pc_type=%04x)",
802 d->name, key, pc_type);
803 return ceph_daemon_add_ds_entry(d, key, pc_type);
804 }
806 /**
807 * Latency counter does not yet have an entry in last poll data - add it.
808 */
809 static int add_last(const char *ds_n, double cur_sum, uint64_t cur_count)
810 {
811 last_poll_data[last_idx] = malloc(1 * sizeof(struct last_data));
812 if(!last_poll_data[last_idx])
813 {
814 return -ENOMEM;
815 }
816 sstrncpy(last_poll_data[last_idx]->ds_name,ds_n,
817 sizeof(last_poll_data[last_idx]->ds_name));
818 last_poll_data[last_idx]->last_sum = cur_sum;
819 last_poll_data[last_idx]->last_count = cur_count;
820 last_idx++;
821 return 0;
822 }
824 /**
825 * Update latency counter or add new entry if it doesn't exist
826 */
827 static int update_last(const char *ds_n, int index, double cur_sum,
828 uint64_t cur_count)
829 {
830 if((last_idx > index) && (strcmp(last_poll_data[index]->ds_name, ds_n) == 0))
831 {
832 last_poll_data[index]->last_sum = cur_sum;
833 last_poll_data[index]->last_count = cur_count;
834 return 0;
835 }
837 if(!last_poll_data)
838 {
839 last_poll_data = malloc(1 * sizeof(struct last_data *));
840 if(!last_poll_data)
841 {
842 return -ENOMEM;
843 }
844 }
845 else
846 {
847 struct last_data **tmp_last = realloc(last_poll_data,
848 ((last_idx+1) * sizeof(struct last_data *)));
849 if(!tmp_last)
850 {
851 return -ENOMEM;
852 }
853 last_poll_data = tmp_last;
854 }
855 return add_last(ds_n, cur_sum, cur_count);
856 }
858 /**
859 * Calculate average b/t current data and last poll data
860 * if last poll data exists
861 */
862 static double get_last_avg(const char *ds_n, int index,
863 double cur_sum, uint64_t cur_count)
864 {
865 double result = -1.1, sum_delt = 0.0;
866 uint64_t count_delt = 0;
867 if((last_idx > index) &&
868 (strcmp(last_poll_data[index]->ds_name, ds_n) == 0) &&
869 (cur_count > last_poll_data[index]->last_count))
870 {
871 sum_delt = (cur_sum - last_poll_data[index]->last_sum);
872 count_delt = (cur_count - last_poll_data[index]->last_count);
873 result = (sum_delt / count_delt);
874 }
876 if(result == -1.1)
877 {
878 result = NAN;
879 }
880 if(update_last(ds_n, index, cur_sum, cur_count) == -ENOMEM)
881 {
882 return -ENOMEM;
883 }
884 return result;
885 }
887 /**
888 * If using index guess failed, resort to searching for counter name
889 */
890 static uint32_t backup_search_for_type(struct ceph_daemon *d, char *ds_name)
891 {
892 int idx = 0;
893 for(; idx < d->ds_num; idx++)
894 {
895 if(strcmp(d->ds_names[idx], ds_name) == 0)
896 {
897 return d->ds_types[idx];
898 }
899 }
900 return DSET_TYPE_UNFOUND;
901 }
903 /**
904 * Process counter data and dispatch values
905 */
906 static int node_handler_fetch_data(void *arg, const char *val, const char *key)
907 {
908 value_t uv;
909 double tmp_d;
910 uint64_t tmp_u;
911 struct values_tmp *vtmp = (struct values_tmp*) arg;
912 uint32_t type = DSET_TYPE_UNFOUND;
913 int index = vtmp->index;
915 char ds_name[DATA_MAX_NAME_LEN];
916 memset(ds_name, 0, sizeof(ds_name));
918 if(parse_keys(key, ds_name))
919 {
920 return 1;
921 }
923 if(index >= vtmp->d->ds_num)
924 {
925 //don't overflow bounds of array
926 index = (vtmp->d->ds_num - 1);
927 }
929 /**
930 * counters should remain in same order we parsed schema... we maintain the
931 * index variable to keep track of current point in list of counters. first
932 * use index to guess point in array for retrieving type. if that doesn't
933 * work, use the old way to get the counter type
934 */
935 if(strcmp(ds_name, vtmp->d->ds_names[index]) == 0)
936 {
937 //found match
938 type = vtmp->d->ds_types[index];
939 }
940 else if((index > 0) && (strcmp(ds_name, vtmp->d->ds_names[index-1]) == 0))
941 {
942 //try previous key
943 type = vtmp->d->ds_types[index-1];
944 }
946 if(type == DSET_TYPE_UNFOUND)
947 {
948 //couldn't find right type by guessing, check the old way
949 type = backup_search_for_type(vtmp->d, ds_name);
950 }
952 switch(type)
953 {
954 case DSET_LATENCY:
955 if(vtmp->avgcount_exists == -1)
956 {
957 sscanf(val, "%" PRIu64, &vtmp->avgcount);
958 vtmp->avgcount_exists = 0;
959 //return after saving avgcount - don't dispatch value
960 //until latency calculation
961 return 0;
962 }
963 else
964 {
965 double sum, result;
966 sscanf(val, "%lf", &sum);
967 DEBUG("ceph plugin: avgcount:%" PRIu64,vtmp->avgcount);
968 DEBUG("ceph plugin: sum:%lf",sum);
970 if(vtmp->avgcount == 0)
971 {
972 vtmp->avgcount = 1;
973 }
975 /** User wants latency values as long run avg */
976 if(long_run_latency_avg)
977 {
978 result = (sum / vtmp->avgcount);
979 DEBUG("ceph plugin: uv->gauge = sumd / avgcounti = :%lf", result);
980 }
981 else
982 {
983 result = get_last_avg(ds_name, vtmp->latency_index, sum, vtmp->avgcount);
984 if(result == -ENOMEM)
985 {
986 return -ENOMEM;
987 }
988 DEBUG("ceph plugin: uv->gauge = (sumd_now - sumd_last) / "
989 "(avgcounti_now - avgcounti_last) = :%lf", result);
990 }
992 uv.gauge = result;
993 vtmp->avgcount_exists = -1;
994 vtmp->latency_index = (vtmp->latency_index + 1);
995 }
996 break;
997 case DSET_BYTES:
998 sscanf(val, "%lf", &tmp_d);
999 uv.gauge = tmp_d;
1000 DEBUG("ceph plugin: uv->gauge = %lf",uv.gauge);
1001 break;
1002 case DSET_RATE:
1003 sscanf(val, "%" PRIu64, &tmp_u);
1004 uv.derive = tmp_u;
1005 DEBUG("ceph plugin: uv->derive = %" PRIu64 "",(uint64_t)uv.derive);
1006 break;
1007 case DSET_TYPE_UNFOUND:
1008 default:
1009 ERROR("ceph plugin: ds %s was not properly initialized.", ds_name);
1010 return -1;
1011 }
1013 sstrncpy(vtmp->vlist.type, ceph_dset_types[type], sizeof(vtmp->vlist.type));
1014 sstrncpy(vtmp->vlist.type_instance, ds_name, sizeof(vtmp->vlist.type_instance));
1015 vtmp->vlist.values = &uv;
1016 vtmp->vlist.values_len = 1;
1018 DEBUG("ceph plugin: dispatching %s\n", ds_name);
1019 vtmp->index = (vtmp->index + 1);
1020 plugin_dispatch_values(&vtmp->vlist);
1022 return 0;
1023 }
1025 static int cconn_connect(struct cconn *io)
1026 {
1027 struct sockaddr_un address;
1028 int flags, fd, err;
1029 if(io->state != CSTATE_UNCONNECTED)
1030 {
1031 ERROR("ceph plugin: cconn_connect: io->state != CSTATE_UNCONNECTED");
1032 return -EDOM;
1033 }
1034 fd = socket(PF_UNIX, SOCK_STREAM, 0);
1035 if(fd < 0)
1036 {
1037 int err = -errno;
1038 ERROR("ceph plugin: cconn_connect: socket(PF_UNIX, SOCK_STREAM, 0) "
1039 "failed: error %d", err);
1040 return err;
1041 }
1042 memset(&address, 0, sizeof(struct sockaddr_un));
1043 address.sun_family = AF_UNIX;
1044 snprintf(address.sun_path, sizeof(address.sun_path), "%s",
1045 io->d->asok_path);
1046 RETRY_ON_EINTR(err,
1047 connect(fd, (struct sockaddr *) &address, sizeof(struct sockaddr_un)));
1048 if(err < 0)
1049 {
1050 ERROR("ceph plugin: cconn_connect: connect(%d) failed: error %d",
1051 fd, err);
1052 return err;
1053 }
1055 flags = fcntl(fd, F_GETFL, 0);
1056 if(fcntl(fd, F_SETFL, flags | O_NONBLOCK) != 0)
1057 {
1058 err = -errno;
1059 ERROR("ceph plugin: cconn_connect: fcntl(%d, O_NONBLOCK) error %d",
1060 fd, err);
1061 return err;
1062 }
1063 io->asok = fd;
1064 io->state = CSTATE_WRITE_REQUEST;
1065 io->amt = 0;
1066 io->json_len = 0;
1067 io->json = NULL;
1068 return 0;
1069 }
1071 static void cconn_close(struct cconn *io)
1072 {
1073 io->state = CSTATE_UNCONNECTED;
1074 if(io->asok != -1)
1075 {
1076 int res;
1077 RETRY_ON_EINTR(res, close(io->asok));
1078 }
1079 io->asok = -1;
1080 io->amt = 0;
1081 io->json_len = 0;
1082 sfree(io->json);
1083 io->json = NULL;
1084 }
1086 /* Process incoming JSON counter data */
1087 static int
1088 cconn_process_data(struct cconn *io, yajl_struct *yajl, yajl_handle hand)
1089 {
1090 int ret;
1091 struct values_tmp *vtmp = calloc(1, sizeof(struct values_tmp) * 1);
1092 if(!vtmp)
1093 {
1094 return -ENOMEM;
1095 }
1097 vtmp->vlist = (value_list_t)VALUE_LIST_INIT;
1098 sstrncpy(vtmp->vlist.host, hostname_g, sizeof(vtmp->vlist.host));
1099 sstrncpy(vtmp->vlist.plugin, "ceph", sizeof(vtmp->vlist.plugin));
1100 sstrncpy(vtmp->vlist.plugin_instance, io->d->name, sizeof(vtmp->vlist.plugin_instance));
1102 vtmp->d = io->d;
1103 vtmp->avgcount_exists = -1;
1104 vtmp->latency_index = 0;
1105 vtmp->index = 0;
1106 yajl->handler_arg = vtmp;
1107 ret = traverse_json(io->json, io->json_len, hand);
1108 sfree(vtmp);
1109 return ret;
1110 }
1112 /**
1113 * Initiate JSON parsing and print error if one occurs
1114 */
1115 static int cconn_process_json(struct cconn *io)
1116 {
1117 if((io->request_type != ASOK_REQ_DATA) &&
1118 (io->request_type != ASOK_REQ_SCHEMA))
1119 {
1120 return -EDOM;
1121 }
1123 int result = 1;
1124 yajl_handle hand;
1125 yajl_status status;
1127 hand = yajl_alloc(&callbacks,
1128 #if HAVE_YAJL_V2
1129 /* alloc funcs = */ NULL,
1130 #else
1131 /* alloc funcs = */ NULL, NULL,
1132 #endif
1133 /* context = */ (void *)(&io->yajl));
1135 if(!hand)
1136 {
1137 ERROR ("ceph plugin: yajl_alloc failed.");
1138 return ENOMEM;
1139 }
1141 io->yajl.depth = 0;
1143 switch(io->request_type)
1144 {
1145 case ASOK_REQ_DATA:
1146 io->yajl.handler = node_handler_fetch_data;
1147 result = cconn_process_data(io, &io->yajl, hand);
1148 break;
1149 case ASOK_REQ_SCHEMA:
1150 io->yajl.handler = node_handler_define_schema;
1151 io->yajl.handler_arg = io->d;
1152 result = traverse_json(io->json, io->json_len, hand);
1153 break;
1154 }
1156 if(result)
1157 {
1158 goto done;
1159 }
1161 #if HAVE_YAJL_V2
1162 status = yajl_complete_parse(hand);
1163 #else
1164 status = yajl_parse_complete(hand);
1165 #endif
1167 if (status != yajl_status_ok)
1168 {
1169 unsigned char *errmsg = yajl_get_error (hand, /* verbose = */ 0,
1170 /* jsonText = */ NULL, /* jsonTextLen = */ 0);
1171 ERROR ("ceph plugin: yajl_parse_complete failed: %s",
1172 (char *) errmsg);
1173 yajl_free_error (hand, errmsg);
1174 yajl_free (hand);
1175 return 1;
1176 }
1178 done:
1179 yajl_free (hand);
1180 return result;
1181 }
1183 static int cconn_validate_revents(struct cconn *io, int revents)
1184 {
1185 if(revents & POLLERR)
1186 {
1187 ERROR("ceph plugin: cconn_validate_revents(name=%s): got POLLERR",
1188 io->d->name);
1189 return -EIO;
1190 }
1191 switch (io->state)
1192 {
1193 case CSTATE_WRITE_REQUEST:
1194 return (revents & POLLOUT) ? 0 : -EINVAL;
1195 case CSTATE_READ_VERSION:
1196 case CSTATE_READ_AMT:
1197 case CSTATE_READ_JSON:
1198 return (revents & POLLIN) ? 0 : -EINVAL;
1199 return (revents & POLLIN) ? 0 : -EINVAL;
1200 default:
1201 ERROR("ceph plugin: cconn_validate_revents(name=%s) got to "
1202 "illegal state on line %d", io->d->name, __LINE__);
1203 return -EDOM;
1204 }
1205 }
1207 /** Handle a network event for a connection */
1208 static int cconn_handle_event(struct cconn *io)
1209 {
1210 int ret;
1211 switch (io->state)
1212 {
1213 case CSTATE_UNCONNECTED:
1214 ERROR("ceph plugin: cconn_handle_event(name=%s) got to illegal "
1215 "state on line %d", io->d->name, __LINE__);
1217 return -EDOM;
1218 case CSTATE_WRITE_REQUEST:
1219 {
1220 char cmd[32];
1221 snprintf(cmd, sizeof(cmd), "%s%d%s", "{ \"prefix\": \"",
1222 io->request_type, "\" }\n");
1223 size_t cmd_len = strlen(cmd);
1224 RETRY_ON_EINTR(ret,
1225 write(io->asok, ((char*)&cmd) + io->amt, cmd_len - io->amt));
1226 DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,amt=%d,ret=%d)",
1227 io->d->name, io->state, io->amt, ret);
1228 if(ret < 0)
1229 {
1230 return ret;
1231 }
1232 io->amt += ret;
1233 if(io->amt >= cmd_len)
1234 {
1235 io->amt = 0;
1236 switch (io->request_type)
1237 {
1238 case ASOK_REQ_VERSION:
1239 io->state = CSTATE_READ_VERSION;
1240 break;
1241 default:
1242 io->state = CSTATE_READ_AMT;
1243 break;
1244 }
1245 }
1246 return 0;
1247 }
1248 case CSTATE_READ_VERSION:
1249 {
1250 RETRY_ON_EINTR(ret,
1251 read(io->asok, ((char*)(&io->d->version)) + io->amt,
1252 sizeof(io->d->version) - io->amt));
1253 DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,ret=%d)",
1254 io->d->name, io->state, ret);
1255 if(ret < 0)
1256 {
1257 return ret;
1258 }
1259 io->amt += ret;
1260 if(io->amt >= sizeof(io->d->version))
1261 {
1262 io->d->version = ntohl(io->d->version);
1263 if(io->d->version != 1)
1264 {
1265 ERROR("ceph plugin: cconn_handle_event(name=%s) not "
1266 "expecting version %d!", io->d->name, io->d->version);
1267 return -ENOTSUP;
1268 }
1269 DEBUG("ceph plugin: cconn_handle_event(name=%s): identified as "
1270 "version %d", io->d->name, io->d->version);
1271 io->amt = 0;
1272 cconn_close(io);
1273 io->request_type = ASOK_REQ_SCHEMA;
1274 }
1275 return 0;
1276 }
1277 case CSTATE_READ_AMT:
1278 {
1279 RETRY_ON_EINTR(ret,
1280 read(io->asok, ((char*)(&io->json_len)) + io->amt,
1281 sizeof(io->json_len) - io->amt));
1282 DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,ret=%d)",
1283 io->d->name, io->state, ret);
1284 if(ret < 0)
1285 {
1286 return ret;
1287 }
1288 io->amt += ret;
1289 if(io->amt >= sizeof(io->json_len))
1290 {
1291 io->json_len = ntohl(io->json_len);
1292 io->amt = 0;
1293 io->state = CSTATE_READ_JSON;
1294 io->json = calloc(1, io->json_len + 1);
1295 if(!io->json)
1296 {
1297 ERROR("ceph plugin: error callocing io->json");
1298 return -ENOMEM;
1299 }
1300 }
1301 return 0;
1302 }
1303 case CSTATE_READ_JSON:
1304 {
1305 RETRY_ON_EINTR(ret,
1306 read(io->asok, io->json + io->amt, io->json_len - io->amt));
1307 DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,ret=%d)",
1308 io->d->name, io->state, ret);
1309 if(ret < 0)
1310 {
1311 return ret;
1312 }
1313 io->amt += ret;
1314 if(io->amt >= io->json_len)
1315 {
1316 ret = cconn_process_json(io);
1317 if(ret)
1318 {
1319 return ret;
1320 }
1321 cconn_close(io);
1322 io->request_type = ASOK_REQ_NONE;
1323 }
1324 return 0;
1325 }
1326 default:
1327 ERROR("ceph plugin: cconn_handle_event(name=%s) got to illegal "
1328 "state on line %d", io->d->name, __LINE__);
1329 return -EDOM;
1330 }
1331 }
1333 static int cconn_prepare(struct cconn *io, struct pollfd* fds)
1334 {
1335 int ret;
1336 if(io->request_type == ASOK_REQ_NONE)
1337 {
1338 /* The request has already been serviced. */
1339 return 0;
1340 }
1341 else if((io->request_type == ASOK_REQ_DATA) && (io->d->ds_num == 0))
1342 {
1343 /* If there are no counters to report on, don't bother
1344 * connecting */
1345 return 0;
1346 }
1348 switch (io->state)
1349 {
1350 case CSTATE_UNCONNECTED:
1351 ret = cconn_connect(io);
1352 if(ret > 0)
1353 {
1354 return -ret;
1355 }
1356 else if(ret < 0)
1357 {
1358 return ret;
1359 }
1360 fds->fd = io->asok;
1361 fds->events = POLLOUT;
1362 return 1;
1363 case CSTATE_WRITE_REQUEST:
1364 fds->fd = io->asok;
1365 fds->events = POLLOUT;
1366 return 1;
1367 case CSTATE_READ_VERSION:
1368 case CSTATE_READ_AMT:
1369 case CSTATE_READ_JSON:
1370 fds->fd = io->asok;
1371 fds->events = POLLIN;
1372 return 1;
1373 default:
1374 ERROR("ceph plugin: cconn_prepare(name=%s) got to illegal state "
1375 "on line %d", io->d->name, __LINE__);
1376 return -EDOM;
1377 }
1378 }
1380 /** Returns the difference between two struct timevals in milliseconds.
1381 * On overflow, we return max/min int.
1382 */
1383 static int milli_diff(const struct timeval *t1, const struct timeval *t2)
1384 {
1385 int64_t ret;
1386 int sec_diff = t1->tv_sec - t2->tv_sec;
1387 int usec_diff = t1->tv_usec - t2->tv_usec;
1388 ret = usec_diff / 1000;
1389 ret += (sec_diff * 1000);
1390 return (ret > INT_MAX) ? INT_MAX : ((ret < INT_MIN) ? INT_MIN : (int)ret);
1391 }
1393 /** This handles the actual network I/O to talk to the Ceph daemons.
1394 */
1395 static int cconn_main_loop(uint32_t request_type)
1396 {
1397 int i, ret, some_unreachable = 0;
1398 struct timeval end_tv;
1399 struct cconn io_array[g_num_daemons];
1401 DEBUG("ceph plugin: entering cconn_main_loop(request_type = %d)", request_type);
1403 /* create cconn array */
1404 memset(io_array, 0, sizeof(io_array));
1405 for(i = 0; i < g_num_daemons; ++i)
1406 {
1407 io_array[i].d = g_daemons[i];
1408 io_array[i].request_type = request_type;
1409 io_array[i].state = CSTATE_UNCONNECTED;
1410 }
1412 /** Calculate the time at which we should give up */
1413 gettimeofday(&end_tv, NULL);
1414 end_tv.tv_sec += CEPH_TIMEOUT_INTERVAL;
1416 while (1)
1417 {
1418 int nfds, diff;
1419 struct timeval tv;
1420 struct cconn *polled_io_array[g_num_daemons];
1421 struct pollfd fds[g_num_daemons];
1422 memset(fds, 0, sizeof(fds));
1423 nfds = 0;
1424 for(i = 0; i < g_num_daemons; ++i)
1425 {
1426 struct cconn *io = io_array + i;
1427 ret = cconn_prepare(io, fds + nfds);
1428 if(ret < 0)
1429 {
1430 WARNING("ceph plugin: cconn_prepare(name=%s,i=%d,st=%d)=%d",
1431 io->d->name, i, io->state, ret);
1432 cconn_close(io);
1433 io->request_type = ASOK_REQ_NONE;
1434 some_unreachable = 1;
1435 }
1436 else if(ret == 1)
1437 {
1438 DEBUG("ceph plugin: did cconn_prepare(name=%s,i=%d,st=%d)",
1439 io->d->name, i, io->state);
1440 polled_io_array[nfds++] = io_array + i;
1441 }
1442 }
1443 if(nfds == 0)
1444 {
1445 /* finished */
1446 ret = 0;
1447 DEBUG("ceph plugin: cconn_main_loop: no more cconn to manage.");
1448 goto done;
1449 }
1450 gettimeofday(&tv, NULL);
1451 diff = milli_diff(&end_tv, &tv);
1452 if(diff <= 0)
1453 {
1454 /* Timed out */
1455 ret = -ETIMEDOUT;
1456 WARNING("ceph plugin: cconn_main_loop: timed out.");
1457 goto done;
1458 }
1459 RETRY_ON_EINTR(ret, poll(fds, nfds, diff));
1460 if(ret < 0)
1461 {
1462 ERROR("ceph plugin: poll(2) error: %d", ret);
1463 goto done;
1464 }
1465 for(i = 0; i < nfds; ++i)
1466 {
1467 struct cconn *io = polled_io_array[i];
1468 int revents = fds[i].revents;
1469 if(revents == 0)
1470 {
1471 /* do nothing */
1472 }
1473 else if(cconn_validate_revents(io, revents))
1474 {
1475 WARNING("ceph plugin: cconn(name=%s,i=%d,st=%d): "
1476 "revents validation error: "
1477 "revents=0x%08x", io->d->name, i, io->state, revents);
1478 cconn_close(io);
1479 io->request_type = ASOK_REQ_NONE;
1480 some_unreachable = 1;
1481 }
1482 else
1483 {
1484 int ret = cconn_handle_event(io);
1485 if(ret)
1486 {
1487 WARNING("ceph plugin: cconn_handle_event(name=%s,"
1488 "i=%d,st=%d): error %d", io->d->name, i, io->state, ret);
1489 cconn_close(io);
1490 io->request_type = ASOK_REQ_NONE;
1491 some_unreachable = 1;
1492 }
1493 }
1494 }
1495 }
1496 done: for(i = 0; i < g_num_daemons; ++i)
1497 {
1498 cconn_close(io_array + i);
1499 }
1500 if(some_unreachable)
1501 {
1502 DEBUG("ceph plugin: cconn_main_loop: some Ceph daemons were unreachable.");
1503 }
1504 else
1505 {
1506 DEBUG("ceph plugin: cconn_main_loop: reached all Ceph daemons :)");
1507 }
1508 return ret;
1509 }
1511 static int ceph_read(void)
1512 {
1513 return cconn_main_loop(ASOK_REQ_DATA);
1514 }
1516 /******* lifecycle *******/
1517 static int ceph_init(void)
1518 {
1519 int ret;
1520 DEBUG("ceph plugin: ceph_init");
1521 ceph_daemons_print();
1523 ret = cconn_main_loop(ASOK_REQ_VERSION);
1525 return (ret) ? ret : 0;
1526 }
1528 static int ceph_shutdown(void)
1529 {
1530 int i;
1531 for(i = 0; i < g_num_daemons; ++i)
1532 {
1533 ceph_daemon_free(g_daemons[i]);
1534 }
1535 sfree(g_daemons);
1536 g_daemons = NULL;
1537 g_num_daemons = 0;
1538 for(i = 0; i < last_idx; i++)
1539 {
1540 sfree(last_poll_data[i]);
1541 }
1542 sfree(last_poll_data);
1543 last_poll_data = NULL;
1544 last_idx = 0;
1545 DEBUG("ceph plugin: finished ceph_shutdown");
1546 return 0;
1547 }
1549 void module_register(void)
1550 {
1551 plugin_register_complex_config("ceph", ceph_config);
1552 plugin_register_init("ceph", ceph_init);
1553 plugin_register_read("ceph", ceph_read);
1554 plugin_register_shutdown("ceph", ceph_shutdown);
1555 }