a719cd376d18a5cd6fb690117133977ddfff13fd
1 /**
2 * collectd - src/write_kafka.c
3 * Copyright (C) 2014 Pierre-Yves Ritschard
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Pierre-Yves Ritschard <pyr at spootnik.org>
25 */
27 #include "collectd.h"
29 #include "plugin.h"
30 #include "common.h"
31 #include "utils_cmd_putval.h"
32 #include "utils_format_graphite.h"
33 #include "utils_format_json.h"
35 #include <stdint.h>
36 #include <librdkafka/rdkafka.h>
37 #include <errno.h>
39 struct kafka_topic_context {
40 #define KAFKA_FORMAT_JSON 0
41 #define KAFKA_FORMAT_COMMAND 1
42 #define KAFKA_FORMAT_GRAPHITE 2
43 uint8_t format;
44 unsigned int graphite_flags;
45 _Bool store_rates;
46 rd_kafka_topic_conf_t *conf;
47 rd_kafka_topic_t *topic;
48 rd_kafka_conf_t *kafka_conf;
49 rd_kafka_t *kafka;
50 char *key;
51 char *prefix;
52 char *postfix;
53 char escape_char;
54 char *topic_name;
55 pthread_mutex_t lock;
56 };
58 static int kafka_handle(struct kafka_topic_context *);
59 static int kafka_write(const data_set_t *, const value_list_t *, user_data_t *);
60 static int32_t kafka_partition(const rd_kafka_topic_t *, const void *, size_t,
61 int32_t, void *, void *);
63 /* Version 0.9.0 of librdkafka deprecates rd_kafka_set_logger() in favor of
64 * rd_kafka_conf_set_log_cb(). This is to make sure we're not using the
65 * deprecated function. */
66 #ifdef HAVE_LIBRDKAFKA_LOG_CB
67 # undef HAVE_LIBRDKAFKA_LOGGER
68 #endif
70 #if defined(HAVE_LIBRDKAFKA_LOGGER) || defined(HAVE_LIBRDKAFKA_LOG_CB)
71 static void kafka_log(const rd_kafka_t *, int, const char *, const char *);
73 static void kafka_log(const rd_kafka_t *rkt, int level,
74 const char *fac, const char *msg)
75 {
76 plugin_log(level, "%s", msg);
77 }
78 #endif
80 static uint32_t kafka_hash(const char *keydata, size_t keylen)
81 {
82 uint32_t hash = 5381;
83 for (; keylen > 0; keylen--)
84 hash = ((hash << 5) + hash) + keydata[keylen - 1];
85 return hash;
86 }
88 /* 31 bit -> 4 byte -> 8 byte hex string + null byte */
89 #define KAFKA_RANDOM_KEY_SIZE 9
90 #define KAFKA_RANDOM_KEY_BUFFER (char[KAFKA_RANDOM_KEY_SIZE]) {""}
91 static char *kafka_random_key(char buffer[static KAFKA_RANDOM_KEY_SIZE])
92 {
93 ssnprintf(buffer, KAFKA_RANDOM_KEY_SIZE, "%08lX", (unsigned long) mrand48());
94 return buffer;
95 }
97 static int32_t kafka_partition(const rd_kafka_topic_t *rkt,
98 const void *keydata, size_t keylen,
99 int32_t partition_cnt, void *p, void *m)
100 {
101 uint32_t key = kafka_hash(keydata, keylen);
102 uint32_t target = key % partition_cnt;
103 int32_t i = partition_cnt;
105 while (--i > 0 && !rd_kafka_topic_partition_available(rkt, target)) {
106 target = (target + 1) % partition_cnt;
107 }
108 return target;
109 }
111 static int kafka_handle(struct kafka_topic_context *ctx) /* {{{ */
112 {
113 char errbuf[1024];
114 rd_kafka_conf_t *conf;
115 rd_kafka_topic_conf_t *topic_conf;
117 if (ctx->kafka != NULL && ctx->topic != NULL)
118 return(0);
120 if (ctx->kafka == NULL) {
121 if ((conf = rd_kafka_conf_dup(ctx->kafka_conf)) == NULL) {
122 ERROR("write_kafka plugin: cannot duplicate kafka config");
123 return(1);
124 }
126 if ((ctx->kafka = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
127 errbuf, sizeof(errbuf))) == NULL) {
128 ERROR("write_kafka plugin: cannot create kafka handle.");
129 return 1;
130 }
132 rd_kafka_conf_destroy(ctx->kafka_conf);
133 ctx->kafka_conf = NULL;
135 INFO ("write_kafka plugin: created KAFKA handle : %s", rd_kafka_name(ctx->kafka));
137 #if defined(HAVE_LIBRDKAFKA_LOGGER) && !defined(HAVE_LIBRDKAFKA_LOG_CB)
138 rd_kafka_set_logger(ctx->kafka, kafka_log);
139 #endif
140 }
142 if (ctx->topic == NULL ) {
143 if ((topic_conf = rd_kafka_topic_conf_dup(ctx->conf)) == NULL) {
144 ERROR("write_kafka plugin: cannot duplicate kafka topic config");
145 return 1;
146 }
148 if ((ctx->topic = rd_kafka_topic_new(ctx->kafka, ctx->topic_name,
149 topic_conf)) == NULL) {
150 ERROR("write_kafka plugin: cannot create topic : %s\n",
151 rd_kafka_err2str(rd_kafka_errno2err(errno)));
152 return errno;
153 }
155 rd_kafka_topic_conf_destroy(ctx->conf);
156 ctx->conf = NULL;
158 INFO ("write_kafka plugin: handle created for topic : %s", rd_kafka_topic_name(ctx->topic));
159 }
161 return(0);
163 } /* }}} int kafka_handle */
165 static int kafka_write(const data_set_t *ds, /* {{{ */
166 const value_list_t *vl,
167 user_data_t *ud)
168 {
169 int status = 0;
170 void *key;
171 size_t keylen = 0;
172 char buffer[8192];
173 size_t bfree = sizeof(buffer);
174 size_t bfill = 0;
175 size_t blen = 0;
176 struct kafka_topic_context *ctx = ud->data;
178 if ((ds == NULL) || (vl == NULL) || (ctx == NULL))
179 return EINVAL;
181 pthread_mutex_lock (&ctx->lock);
182 status = kafka_handle(ctx);
183 pthread_mutex_unlock (&ctx->lock);
184 if( status != 0 )
185 return status;
187 bzero(buffer, sizeof(buffer));
189 switch (ctx->format) {
190 case KAFKA_FORMAT_COMMAND:
191 status = cmd_create_putval(buffer, sizeof(buffer), ds, vl);
192 if (status != 0) {
193 ERROR("write_kafka plugin: cmd_create_putval failed with status %i.",
194 status);
195 return status;
196 }
197 blen = strlen(buffer);
198 break;
199 case KAFKA_FORMAT_JSON:
200 format_json_initialize(buffer, &bfill, &bfree);
201 format_json_value_list(buffer, &bfill, &bfree, ds, vl,
202 ctx->store_rates);
203 format_json_finalize(buffer, &bfill, &bfree);
204 blen = strlen(buffer);
205 break;
206 case KAFKA_FORMAT_GRAPHITE:
207 status = format_graphite(buffer, sizeof(buffer), ds, vl,
208 ctx->prefix, ctx->postfix, ctx->escape_char,
209 ctx->graphite_flags);
210 if (status != 0) {
211 ERROR("write_kafka plugin: format_graphite failed with status %i.",
212 status);
213 return status;
214 }
215 blen = strlen(buffer);
216 break;
217 default:
218 ERROR("write_kafka plugin: invalid format %i.", ctx->format);
219 return -1;
220 }
222 key = (ctx->key != NULL)
223 ? ctx->key
224 : kafka_random_key(KAFKA_RANDOM_KEY_BUFFER);
225 keylen = strlen (key);
227 rd_kafka_produce(ctx->topic, RD_KAFKA_PARTITION_UA,
228 RD_KAFKA_MSG_F_COPY, buffer, blen,
229 key, keylen, NULL);
231 return status;
232 } /* }}} int kafka_write */
234 static void kafka_topic_context_free(void *p) /* {{{ */
235 {
236 struct kafka_topic_context *ctx = p;
238 if (ctx == NULL)
239 return;
241 if (ctx->topic_name != NULL)
242 sfree(ctx->topic_name);
243 if (ctx->topic != NULL)
244 rd_kafka_topic_destroy(ctx->topic);
245 if (ctx->conf != NULL)
246 rd_kafka_topic_conf_destroy(ctx->conf);
247 if (ctx->kafka_conf != NULL)
248 rd_kafka_conf_destroy(ctx->kafka_conf);
249 if (ctx->kafka != NULL)
250 rd_kafka_destroy(ctx->kafka);
252 sfree(ctx);
253 } /* }}} void kafka_topic_context_free */
255 static void kafka_config_topic(rd_kafka_conf_t *conf, oconfig_item_t *ci) /* {{{ */
256 {
257 int status;
258 struct kafka_topic_context *tctx;
259 char *key = NULL;
260 char *val;
261 char callback_name[DATA_MAX_NAME_LEN];
262 char errbuf[1024];
263 oconfig_item_t *child;
264 rd_kafka_conf_res_t ret;
266 if ((tctx = calloc(1, sizeof (*tctx))) == NULL) {
267 ERROR ("write_kafka plugin: calloc failed.");
268 return;
269 }
271 tctx->escape_char = '.';
272 tctx->store_rates = 1;
273 tctx->format = KAFKA_FORMAT_JSON;
274 tctx->key = NULL;
276 if ((tctx->kafka_conf = rd_kafka_conf_dup(conf)) == NULL) {
277 sfree(tctx);
278 ERROR("write_kafka plugin: cannot allocate memory for kafka config");
279 return;
280 }
282 #ifdef HAVE_LIBRDKAFKA_LOG_CB
283 rd_kafka_conf_set_log_cb(tctx->kafka_conf, kafka_log);
284 #endif
286 if ((tctx->conf = rd_kafka_topic_conf_new()) == NULL) {
287 rd_kafka_conf_destroy(tctx->kafka_conf);
288 sfree(tctx);
289 ERROR ("write_kafka plugin: cannot create topic configuration.");
290 return;
291 }
293 if (ci->values_num != 1) {
294 WARNING("kafka topic name needed.");
295 goto errout;
296 }
298 if (ci->values[0].type != OCONFIG_TYPE_STRING) {
299 WARNING("kafka topic needs a string argument.");
300 goto errout;
301 }
303 if ((tctx->topic_name = strdup(ci->values[0].value.string)) == NULL) {
304 ERROR("write_kafka plugin: cannot copy topic name.");
305 goto errout;
306 }
308 for (int i = 0; i < ci->children_num; i++) {
309 /*
310 * The code here could be simplified but makes room
311 * for easy adding of new options later on.
312 */
313 child = &ci->children[i];
314 status = 0;
316 if (strcasecmp ("Property", child->key) == 0) {
317 if (child->values_num != 2) {
318 WARNING("kafka properties need both a key and a value.");
319 goto errout;
320 }
321 if (child->values[0].type != OCONFIG_TYPE_STRING ||
322 child->values[1].type != OCONFIG_TYPE_STRING) {
323 WARNING("kafka properties needs string arguments.");
324 goto errout;
325 }
326 key = child->values[0].value.string;
327 val = child->values[1].value.string;
328 ret = rd_kafka_topic_conf_set(tctx->conf,key, val,
329 errbuf, sizeof(errbuf));
330 if (ret != RD_KAFKA_CONF_OK) {
331 WARNING("cannot set kafka topic property %s to %s: %s.",
332 key, val, errbuf);
333 goto errout;
334 }
336 } else if (strcasecmp ("Key", child->key) == 0) {
337 if (cf_util_get_string (child, &tctx->key) != 0)
338 continue;
339 if (strcasecmp ("Random", tctx->key) == 0) {
340 sfree(tctx->key);
341 tctx->key = strdup (kafka_random_key (KAFKA_RANDOM_KEY_BUFFER));
342 }
343 } else if (strcasecmp ("Format", child->key) == 0) {
344 status = cf_util_get_string(child, &key);
345 if (status != 0)
346 goto errout;
348 assert(key != NULL);
350 if (strcasecmp(key, "Command") == 0) {
351 tctx->format = KAFKA_FORMAT_COMMAND;
353 } else if (strcasecmp(key, "Graphite") == 0) {
354 tctx->format = KAFKA_FORMAT_GRAPHITE;
356 } else if (strcasecmp(key, "Json") == 0) {
357 tctx->format = KAFKA_FORMAT_JSON;
359 } else {
360 WARNING ("write_kafka plugin: Invalid format string: %s",
361 key);
362 }
364 sfree(key);
366 } else if (strcasecmp ("StoreRates", child->key) == 0) {
367 status = cf_util_get_boolean (child, &tctx->store_rates);
368 (void) cf_util_get_flag (child, &tctx->graphite_flags,
369 GRAPHITE_STORE_RATES);
371 } else if (strcasecmp ("GraphiteSeparateInstances", child->key) == 0) {
372 status = cf_util_get_flag (child, &tctx->graphite_flags,
373 GRAPHITE_SEPARATE_INSTANCES);
375 } else if (strcasecmp ("GraphiteAlwaysAppendDS", child->key) == 0) {
376 status = cf_util_get_flag (child, &tctx->graphite_flags,
377 GRAPHITE_ALWAYS_APPEND_DS);
379 } else if (strcasecmp ("GraphitePreserveSeparator", child->key) == 0) {
380 status = cf_util_get_flag (child, &tctx->graphite_flags,
381 GRAPHITE_PRESERVE_SEPARATOR);
383 } else if (strcasecmp ("GraphitePrefix", child->key) == 0) {
384 status = cf_util_get_string (child, &tctx->prefix);
385 } else if (strcasecmp ("GraphitePostfix", child->key) == 0) {
386 status = cf_util_get_string (child, &tctx->postfix);
387 } else if (strcasecmp ("GraphiteEscapeChar", child->key) == 0) {
388 char *tmp_buff = NULL;
389 status = cf_util_get_string (child, &tmp_buff);
390 if (strlen (tmp_buff) > 1)
391 WARNING ("write_kafka plugin: The option \"GraphiteEscapeChar\" handles "
392 "only one character. Others will be ignored.");
393 tctx->escape_char = tmp_buff[0];
394 sfree (tmp_buff);
395 } else {
396 WARNING ("write_kafka plugin: Invalid directive: %s.", child->key);
397 }
399 if (status != 0)
400 break;
401 }
403 rd_kafka_topic_conf_set_partitioner_cb(tctx->conf, kafka_partition);
404 rd_kafka_topic_conf_set_opaque(tctx->conf, tctx);
406 ssnprintf(callback_name, sizeof(callback_name),
407 "write_kafka/%s", tctx->topic_name);
409 status = plugin_register_write (callback_name, kafka_write,
410 &(user_data_t) {
411 .data = tctx,
412 .free_func = kafka_topic_context_free,
413 });
414 if (status != 0) {
415 WARNING ("write_kafka plugin: plugin_register_write (\"%s\") "
416 "failed with status %i.",
417 callback_name, status);
418 goto errout;
419 }
421 pthread_mutex_init (&tctx->lock, /* attr = */ NULL);
423 return;
424 errout:
425 if (tctx->topic_name != NULL)
426 free(tctx->topic_name);
427 if (tctx->conf != NULL)
428 rd_kafka_topic_conf_destroy(tctx->conf);
429 if (tctx->kafka_conf != NULL)
430 rd_kafka_conf_destroy(tctx->kafka_conf);
431 sfree(tctx);
432 } /* }}} int kafka_config_topic */
434 static int kafka_config(oconfig_item_t *ci) /* {{{ */
435 {
436 oconfig_item_t *child;
437 rd_kafka_conf_t *conf;
438 rd_kafka_conf_res_t ret;
439 char errbuf[1024];
441 if ((conf = rd_kafka_conf_new()) == NULL) {
442 WARNING("cannot allocate kafka configuration.");
443 return -1;
444 }
445 for (int i = 0; i < ci->children_num; i++) {
446 child = &ci->children[i];
448 if (strcasecmp("Topic", child->key) == 0) {
449 kafka_config_topic (conf, child);
450 } else if (strcasecmp(child->key, "Property") == 0) {
451 char *key = NULL;
452 char *val = NULL;
454 if (child->values_num != 2) {
455 WARNING("kafka properties need both a key and a value.");
456 goto errout;
457 }
458 if (child->values[0].type != OCONFIG_TYPE_STRING ||
459 child->values[1].type != OCONFIG_TYPE_STRING) {
460 WARNING("kafka properties needs string arguments.");
461 goto errout;
462 }
463 if ((key = strdup(child->values[0].value.string)) == NULL) {
464 WARNING("cannot allocate memory for attribute key.");
465 goto errout;
466 }
467 if ((val = strdup(child->values[1].value.string)) == NULL) {
468 WARNING("cannot allocate memory for attribute value.");
469 sfree(key);
470 goto errout;
471 }
472 ret = rd_kafka_conf_set(conf, key, val, errbuf, sizeof(errbuf));
473 if (ret != RD_KAFKA_CONF_OK) {
474 WARNING("cannot set kafka property %s to %s: %s",
475 key, val, errbuf);
476 sfree(key);
477 sfree(val);
478 goto errout;
479 }
480 sfree(key);
481 sfree(val);
482 } else {
483 WARNING ("write_kafka plugin: Ignoring unknown "
484 "configuration option \"%s\" at top level.",
485 child->key);
486 }
487 }
488 if (conf != NULL)
489 rd_kafka_conf_destroy(conf);
490 return (0);
491 errout:
492 if (conf != NULL)
493 rd_kafka_conf_destroy(conf);
494 return -1;
495 } /* }}} int kafka_config */
497 void module_register(void)
498 {
499 plugin_register_complex_config ("write_kafka", kafka_config);
500 }