654db0a4afef495758215844d05fbb4597060714
1 /**
2 * collectd - src/write_kafka.c
3 * Copyright (C) 2014 Pierre-Yves Ritschard
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Pierre-Yves Ritschard <pyr at spootnik.org>
25 */
27 #include "collectd.h"
29 #include "plugin.h"
30 #include "common.h"
31 #include "utils_cmd_putval.h"
32 #include "utils_format_graphite.h"
33 #include "utils_format_json.h"
35 #include <stdint.h>
36 #include <librdkafka/rdkafka.h>
37 #include <errno.h>
39 struct kafka_topic_context {
40 #define KAFKA_FORMAT_JSON 0
41 #define KAFKA_FORMAT_COMMAND 1
42 #define KAFKA_FORMAT_GRAPHITE 2
43 uint8_t format;
44 unsigned int graphite_flags;
45 _Bool store_rates;
46 rd_kafka_topic_conf_t *conf;
47 rd_kafka_topic_t *topic;
48 rd_kafka_conf_t *kafka_conf;
49 rd_kafka_t *kafka;
50 char *key;
51 char *prefix;
52 char *postfix;
53 char escape_char;
54 char *topic_name;
55 pthread_mutex_t lock;
56 };
58 static int kafka_handle(struct kafka_topic_context *);
59 static int kafka_write(const data_set_t *, const value_list_t *, user_data_t *);
60 static int32_t kafka_partition(const rd_kafka_topic_t *, const void *, size_t,
61 int32_t, void *, void *);
63 #if defined HAVE_LIBRDKAFKA_LOGGER || defined HAVE_LIBRDKAFKA_LOG_CB
64 static void kafka_log(const rd_kafka_t *, int, const char *, const char *);
66 static void kafka_log(const rd_kafka_t *rkt, int level,
67 const char *fac, const char *msg)
68 {
69 plugin_log(level, "%s", msg);
70 }
71 #endif
73 static uint32_t kafka_hash(const char *keydata, size_t keylen)
74 {
75 uint32_t hash = 5381;
76 for (; keylen > 0; keylen--)
77 hash = ((hash << 5) + hash) + keydata[keylen - 1];
78 return hash;
79 }
81 /* 31 bit -> 4 byte -> 8 byte hex string + null byte */
82 #define KAFKA_RANDOM_KEY_SIZE 9
83 #define KAFKA_RANDOM_KEY_BUFFER (char[KAFKA_RANDOM_KEY_SIZE]) {""}
84 static char *kafka_random_key(char buffer[static KAFKA_RANDOM_KEY_SIZE])
85 {
86 ssnprintf(buffer, KAFKA_RANDOM_KEY_SIZE, "%08lX", (unsigned long) mrand48());
87 return buffer;
88 }
90 static int32_t kafka_partition(const rd_kafka_topic_t *rkt,
91 const void *keydata, size_t keylen,
92 int32_t partition_cnt, void *p, void *m)
93 {
94 uint32_t key = kafka_hash(keydata, keylen);
95 uint32_t target = key % partition_cnt;
96 int32_t i = partition_cnt;
98 while (--i > 0 && !rd_kafka_topic_partition_available(rkt, target)) {
99 target = (target + 1) % partition_cnt;
100 }
101 return target;
102 }
104 static int kafka_handle(struct kafka_topic_context *ctx) /* {{{ */
105 {
106 char errbuf[1024];
107 rd_kafka_conf_t *conf;
108 rd_kafka_topic_conf_t *topic_conf;
110 if (ctx->kafka != NULL && ctx->topic != NULL)
111 return(0);
113 if (ctx->kafka == NULL) {
114 if ((conf = rd_kafka_conf_dup(ctx->kafka_conf)) == NULL) {
115 ERROR("write_kafka plugin: cannot duplicate kafka config");
116 return(1);
117 }
119 if ((ctx->kafka = rd_kafka_new(RD_KAFKA_PRODUCER, conf,
120 errbuf, sizeof(errbuf))) == NULL) {
121 ERROR("write_kafka plugin: cannot create kafka handle.");
122 return 1;
123 }
125 rd_kafka_conf_destroy(ctx->kafka_conf);
126 ctx->kafka_conf = NULL;
128 INFO ("write_kafka plugin: created KAFKA handle : %s", rd_kafka_name(ctx->kafka));
130 #if defined(HAVE_LIBRDKAFKA_LOGGER) && !defined(HAVE_LIBRDKAFKA_LOG_CB)
131 rd_kafka_set_logger(ctx->kafka, kafka_log);
132 #endif
133 }
135 if (ctx->topic == NULL ) {
136 if ((topic_conf = rd_kafka_topic_conf_dup(ctx->conf)) == NULL) {
137 ERROR("write_kafka plugin: cannot duplicate kafka topic config");
138 return 1;
139 }
141 if ((ctx->topic = rd_kafka_topic_new(ctx->kafka, ctx->topic_name,
142 topic_conf)) == NULL) {
143 ERROR("write_kafka plugin: cannot create topic : %s\n",
144 rd_kafka_err2str(rd_kafka_errno2err(errno)));
145 return errno;
146 }
148 rd_kafka_topic_conf_destroy(ctx->conf);
149 ctx->conf = NULL;
151 INFO ("write_kafka plugin: handle created for topic : %s", rd_kafka_topic_name(ctx->topic));
152 }
154 return(0);
156 } /* }}} int kafka_handle */
158 static int kafka_write(const data_set_t *ds, /* {{{ */
159 const value_list_t *vl,
160 user_data_t *ud)
161 {
162 int status = 0;
163 void *key;
164 size_t keylen = 0;
165 char buffer[8192];
166 size_t bfree = sizeof(buffer);
167 size_t bfill = 0;
168 size_t blen = 0;
169 struct kafka_topic_context *ctx = ud->data;
171 if ((ds == NULL) || (vl == NULL) || (ctx == NULL))
172 return EINVAL;
174 pthread_mutex_lock (&ctx->lock);
175 status = kafka_handle(ctx);
176 pthread_mutex_unlock (&ctx->lock);
177 if( status != 0 )
178 return status;
180 bzero(buffer, sizeof(buffer));
182 switch (ctx->format) {
183 case KAFKA_FORMAT_COMMAND:
184 status = cmd_create_putval(buffer, sizeof(buffer), ds, vl);
185 if (status != 0) {
186 ERROR("write_kafka plugin: cmd_create_putval failed with status %i.",
187 status);
188 return status;
189 }
190 blen = strlen(buffer);
191 break;
192 case KAFKA_FORMAT_JSON:
193 format_json_initialize(buffer, &bfill, &bfree);
194 format_json_value_list(buffer, &bfill, &bfree, ds, vl,
195 ctx->store_rates);
196 format_json_finalize(buffer, &bfill, &bfree);
197 blen = strlen(buffer);
198 break;
199 case KAFKA_FORMAT_GRAPHITE:
200 status = format_graphite(buffer, sizeof(buffer), ds, vl,
201 ctx->prefix, ctx->postfix, ctx->escape_char,
202 ctx->graphite_flags);
203 if (status != 0) {
204 ERROR("write_kafka plugin: format_graphite failed with status %i.",
205 status);
206 return status;
207 }
208 blen = strlen(buffer);
209 break;
210 default:
211 ERROR("write_kafka plugin: invalid format %i.", ctx->format);
212 return -1;
213 }
215 key = (ctx->key != NULL)
216 ? ctx->key
217 : kafka_random_key(KAFKA_RANDOM_KEY_BUFFER);
218 keylen = strlen (key);
220 rd_kafka_produce(ctx->topic, RD_KAFKA_PARTITION_UA,
221 RD_KAFKA_MSG_F_COPY, buffer, blen,
222 key, keylen, NULL);
224 return status;
225 } /* }}} int kafka_write */
227 static void kafka_topic_context_free(void *p) /* {{{ */
228 {
229 struct kafka_topic_context *ctx = p;
231 if (ctx == NULL)
232 return;
234 if (ctx->topic_name != NULL)
235 sfree(ctx->topic_name);
236 if (ctx->topic != NULL)
237 rd_kafka_topic_destroy(ctx->topic);
238 if (ctx->conf != NULL)
239 rd_kafka_topic_conf_destroy(ctx->conf);
240 if (ctx->kafka_conf != NULL)
241 rd_kafka_conf_destroy(ctx->kafka_conf);
242 if (ctx->kafka != NULL)
243 rd_kafka_destroy(ctx->kafka);
245 sfree(ctx);
246 } /* }}} void kafka_topic_context_free */
248 static void kafka_config_topic(rd_kafka_conf_t *conf, oconfig_item_t *ci) /* {{{ */
249 {
250 int status;
251 struct kafka_topic_context *tctx;
252 char *key = NULL;
253 char *val;
254 char callback_name[DATA_MAX_NAME_LEN];
255 char errbuf[1024];
256 oconfig_item_t *child;
257 rd_kafka_conf_res_t ret;
259 if ((tctx = calloc(1, sizeof (*tctx))) == NULL) {
260 ERROR ("write_kafka plugin: calloc failed.");
261 return;
262 }
264 tctx->escape_char = '.';
265 tctx->store_rates = 1;
266 tctx->format = KAFKA_FORMAT_JSON;
267 tctx->key = NULL;
269 if ((tctx->kafka_conf = rd_kafka_conf_dup(conf)) == NULL) {
270 sfree(tctx);
271 ERROR("write_kafka plugin: cannot allocate memory for kafka config");
272 return;
273 }
275 #ifdef HAVE_LIBRDKAFKA_LOG_CB
276 rd_kafka_conf_set_log_cb(tctx->kafka_conf, kafka_log);
277 #endif
279 if ((tctx->conf = rd_kafka_topic_conf_new()) == NULL) {
280 rd_kafka_conf_destroy(tctx->kafka_conf);
281 sfree(tctx);
282 ERROR ("write_kafka plugin: cannot create topic configuration.");
283 return;
284 }
286 if (ci->values_num != 1) {
287 WARNING("kafka topic name needed.");
288 goto errout;
289 }
291 if (ci->values[0].type != OCONFIG_TYPE_STRING) {
292 WARNING("kafka topic needs a string argument.");
293 goto errout;
294 }
296 if ((tctx->topic_name = strdup(ci->values[0].value.string)) == NULL) {
297 ERROR("write_kafka plugin: cannot copy topic name.");
298 goto errout;
299 }
301 for (int i = 0; i < ci->children_num; i++) {
302 /*
303 * The code here could be simplified but makes room
304 * for easy adding of new options later on.
305 */
306 child = &ci->children[i];
307 status = 0;
309 if (strcasecmp ("Property", child->key) == 0) {
310 if (child->values_num != 2) {
311 WARNING("kafka properties need both a key and a value.");
312 goto errout;
313 }
314 if (child->values[0].type != OCONFIG_TYPE_STRING ||
315 child->values[1].type != OCONFIG_TYPE_STRING) {
316 WARNING("kafka properties needs string arguments.");
317 goto errout;
318 }
319 key = child->values[0].value.string;
320 val = child->values[1].value.string;
321 ret = rd_kafka_topic_conf_set(tctx->conf,key, val,
322 errbuf, sizeof(errbuf));
323 if (ret != RD_KAFKA_CONF_OK) {
324 WARNING("cannot set kafka topic property %s to %s: %s.",
325 key, val, errbuf);
326 goto errout;
327 }
329 } else if (strcasecmp ("Key", child->key) == 0) {
330 if (cf_util_get_string (child, &tctx->key) != 0)
331 continue;
332 if (strcasecmp ("Random", tctx->key) == 0) {
333 sfree(tctx->key);
334 tctx->key = strdup (kafka_random_key (KAFKA_RANDOM_KEY_BUFFER));
335 }
336 } else if (strcasecmp ("Format", child->key) == 0) {
337 status = cf_util_get_string(child, &key);
338 if (status != 0)
339 goto errout;
341 assert(key != NULL);
343 if (strcasecmp(key, "Command") == 0) {
344 tctx->format = KAFKA_FORMAT_COMMAND;
346 } else if (strcasecmp(key, "Graphite") == 0) {
347 tctx->format = KAFKA_FORMAT_GRAPHITE;
349 } else if (strcasecmp(key, "Json") == 0) {
350 tctx->format = KAFKA_FORMAT_JSON;
352 } else {
353 WARNING ("write_kafka plugin: Invalid format string: %s",
354 key);
355 }
357 sfree(key);
359 } else if (strcasecmp ("StoreRates", child->key) == 0) {
360 status = cf_util_get_boolean (child, &tctx->store_rates);
361 (void) cf_util_get_flag (child, &tctx->graphite_flags,
362 GRAPHITE_STORE_RATES);
364 } else if (strcasecmp ("GraphiteSeparateInstances", child->key) == 0) {
365 status = cf_util_get_flag (child, &tctx->graphite_flags,
366 GRAPHITE_SEPARATE_INSTANCES);
368 } else if (strcasecmp ("GraphiteAlwaysAppendDS", child->key) == 0) {
369 status = cf_util_get_flag (child, &tctx->graphite_flags,
370 GRAPHITE_ALWAYS_APPEND_DS);
372 } else if (strcasecmp ("GraphitePrefix", child->key) == 0) {
373 status = cf_util_get_string (child, &tctx->prefix);
374 } else if (strcasecmp ("GraphitePostfix", child->key) == 0) {
375 status = cf_util_get_string (child, &tctx->postfix);
376 } else if (strcasecmp ("GraphiteEscapeChar", child->key) == 0) {
377 char *tmp_buff = NULL;
378 status = cf_util_get_string (child, &tmp_buff);
379 if (strlen (tmp_buff) > 1)
380 WARNING ("write_kafka plugin: The option \"GraphiteEscapeChar\" handles "
381 "only one character. Others will be ignored.");
382 tctx->escape_char = tmp_buff[0];
383 sfree (tmp_buff);
384 } else {
385 WARNING ("write_kafka plugin: Invalid directive: %s.", child->key);
386 }
388 if (status != 0)
389 break;
390 }
392 rd_kafka_topic_conf_set_partitioner_cb(tctx->conf, kafka_partition);
393 rd_kafka_topic_conf_set_opaque(tctx->conf, tctx);
395 ssnprintf(callback_name, sizeof(callback_name),
396 "write_kafka/%s", tctx->topic_name);
398 status = plugin_register_write (callback_name, kafka_write,
399 &(user_data_t) {
400 .data = tctx,
401 .free_func = kafka_topic_context_free,
402 });
403 if (status != 0) {
404 WARNING ("write_kafka plugin: plugin_register_write (\"%s\") "
405 "failed with status %i.",
406 callback_name, status);
407 goto errout;
408 }
410 pthread_mutex_init (&tctx->lock, /* attr = */ NULL);
412 return;
413 errout:
414 if (tctx->topic_name != NULL)
415 free(tctx->topic_name);
416 if (tctx->conf != NULL)
417 rd_kafka_topic_conf_destroy(tctx->conf);
418 if (tctx->kafka_conf != NULL)
419 rd_kafka_conf_destroy(tctx->kafka_conf);
420 sfree(tctx);
421 } /* }}} int kafka_config_topic */
423 static int kafka_config(oconfig_item_t *ci) /* {{{ */
424 {
425 oconfig_item_t *child;
426 rd_kafka_conf_t *conf;
427 rd_kafka_conf_res_t ret;
428 char errbuf[1024];
430 if ((conf = rd_kafka_conf_new()) == NULL) {
431 WARNING("cannot allocate kafka configuration.");
432 return -1;
433 }
434 for (int i = 0; i < ci->children_num; i++) {
435 child = &ci->children[i];
437 if (strcasecmp("Topic", child->key) == 0) {
438 kafka_config_topic (conf, child);
439 } else if (strcasecmp(child->key, "Property") == 0) {
440 char *key = NULL;
441 char *val = NULL;
443 if (child->values_num != 2) {
444 WARNING("kafka properties need both a key and a value.");
445 goto errout;
446 }
447 if (child->values[0].type != OCONFIG_TYPE_STRING ||
448 child->values[1].type != OCONFIG_TYPE_STRING) {
449 WARNING("kafka properties needs string arguments.");
450 goto errout;
451 }
452 if ((key = strdup(child->values[0].value.string)) == NULL) {
453 WARNING("cannot allocate memory for attribute key.");
454 goto errout;
455 }
456 if ((val = strdup(child->values[1].value.string)) == NULL) {
457 WARNING("cannot allocate memory for attribute value.");
458 sfree(key);
459 goto errout;
460 }
461 ret = rd_kafka_conf_set(conf, key, val, errbuf, sizeof(errbuf));
462 if (ret != RD_KAFKA_CONF_OK) {
463 WARNING("cannot set kafka property %s to %s: %s",
464 key, val, errbuf);
465 sfree(key);
466 sfree(val);
467 goto errout;
468 }
469 sfree(key);
470 sfree(val);
471 } else {
472 WARNING ("write_kafka plugin: Ignoring unknown "
473 "configuration option \"%s\" at top level.",
474 child->key);
475 }
476 }
477 if (conf != NULL)
478 rd_kafka_conf_destroy(conf);
479 return (0);
480 errout:
481 if (conf != NULL)
482 rd_kafka_conf_destroy(conf);
483 return -1;
484 } /* }}} int kafka_config */
486 void module_register(void)
487 {
488 plugin_register_complex_config ("write_kafka", kafka_config);
489 }