1 /*****************************************************************************
2 * RRDtool 1.3.2 Copyright by Tobi Oetiker, 1997-2008
3 *****************************************************************************
4 * rrd_open.c Open an RRD File
5 *****************************************************************************
6 * $Id$
7 *****************************************************************************/
9 #include <stdlib.h>
10 #include <fcntl.h>
11 #include <sys/stat.h>
12 #include <utime.h>
14 #include "rrd_tool.h"
15 #include "unused.h"
16 #define MEMBLK 8192
18 #ifdef WIN32
19 # define random() rand()
20 # define srandom(x) srand(x)
21 # define getpid() 0
23 #define _LK_UNLCK 0 /* Unlock */
24 #define _LK_LOCK 1 /* Lock */
25 #define _LK_NBLCK 2 /* Non-blocking lock */
26 #define _LK_RLCK 3 /* Lock for read only */
27 #define _LK_NBRLCK 4 /* Non-blocking lock for read only */
30 #define LK_UNLCK _LK_UNLCK
31 #define LK_LOCK _LK_LOCK
32 #define LK_NBLCK _LK_NBLCK
33 #define LK_RLCK _LK_RLCK
34 #define LK_NBRLCK _LK_NBRLCK
35 #endif
37 /* DEBUG 2 prints information obtained via mincore(2) */
38 #define DEBUG 1
39 /* do not calculate exact madvise hints but assume 1 page for headers and
40 * set DONTNEED for the rest, which is assumed to be data */
41 /* Avoid calling madvise on areas that were already hinted. May be benefical if
42 * your syscalls are very slow */
44 #ifdef HAVE_MMAP
45 /* the cast to void* is there to avoid this warning seen on ia64 with certain
46 versions of gcc: 'cast increases required alignment of target type'
47 */
48 #define __rrd_read(dst, dst_t, cnt) { \
49 size_t wanted = sizeof(dst_t)*(cnt); \
50 if (offset + wanted > rrd_file->file_len) { \
51 rrd_set_error("reached EOF while loading header " #dst); \
52 goto out_nullify_head; \
53 } \
54 (dst) = (dst_t*)(void*) (data + offset); \
55 offset += wanted; \
56 }
57 #else
58 #define __rrd_read(dst, dst_t, cnt) { \
59 size_t wanted = sizeof(dst_t)*(cnt); \
60 size_t got; \
61 if ((dst = (dst_t*)malloc(wanted)) == NULL) { \
62 rrd_set_error(#dst " malloc"); \
63 goto out_nullify_head; \
64 } \
65 got = read (rrd_simple_file->fd, dst, wanted); \
66 if (got != wanted) { \
67 rrd_set_error("short read while reading header " #dst); \
68 goto out_nullify_head; \
69 } \
70 offset += got; \
71 }
72 #endif
74 /* get the address of the start of this page */
75 #if defined USE_MADVISE || defined HAVE_POSIX_FADVISE
76 #ifndef PAGE_START
77 #define PAGE_START(addr) ((addr)&(~(_page_size-1)))
78 #endif
79 #endif
81 long int rra_random_row(
82 rra_def_t *);
85 /* Open a database file, return its header and an open filehandle,
86 * positioned to the first cdp in the first rra.
87 * In the error path of rrd_open, only rrd_free(&rrd) has to be called
88 * before returning an error. Do not call rrd_close upon failure of rrd_open.
89 * If creating a new file, the parameter rrd must be initialised with
90 * details of the file content.
91 * If opening an existing file, then use rrd must be initialised by
92 * rrd_init(rrd) prior to invoking rrd_open
93 */
95 rrd_file_t *rrd_open(
96 const char *const file_name,
97 rrd_t *rrd,
98 unsigned rdwr)
99 {
100 unsigned long ui;
101 int flags = 0;
102 int version;
104 #ifdef HAVE_MMAP
105 ssize_t _page_size = sysconf(_SC_PAGESIZE);
106 char *data = MAP_FAILED;
107 #endif
108 off_t offset = 0;
109 struct stat statb;
110 rrd_file_t *rrd_file = NULL;
111 rrd_simple_file_t *rrd_simple_file = NULL;
112 size_t newfile_size = 0;
113 size_t header_len, value_cnt, data_len;
115 /* Are we creating a new file? */
116 if((rdwr & RRD_CREAT) && (rrd->stat_head != NULL))
117 {
118 header_len = \
119 sizeof(stat_head_t) + \
120 sizeof(ds_def_t) * rrd->stat_head->ds_cnt + \
121 sizeof(rra_def_t) * rrd->stat_head->rra_cnt + \
122 sizeof(time_t) + \
123 sizeof(live_head_t) + \
124 sizeof(pdp_prep_t) * rrd->stat_head->ds_cnt + \
125 sizeof(cdp_prep_t) * rrd->stat_head->ds_cnt * rrd->stat_head->rra_cnt + \
126 sizeof(rra_ptr_t) * rrd->stat_head->rra_cnt;
128 value_cnt = 0;
129 for (ui = 0; ui < rrd->stat_head->rra_cnt; ui++)
130 value_cnt += rrd->stat_head->ds_cnt * rrd->rra_def[ui].row_cnt;
132 data_len = sizeof(rrd_value_t) * value_cnt;
134 newfile_size = header_len + data_len;
135 }
137 rrd_file = (rrd_file_t*)malloc(sizeof(rrd_file_t));
138 if (rrd_file == NULL) {
139 rrd_set_error("allocating rrd_file descriptor for '%s'", file_name);
140 return NULL;
141 }
142 memset(rrd_file, 0, sizeof(rrd_file_t));
144 rrd_file->pvt = malloc(sizeof(rrd_simple_file_t));
145 if(rrd_file->pvt == NULL) {
146 rrd_set_error("allocating rrd_simple_file for '%s'", file_name);
147 return NULL;
148 }
149 memset(rrd_file->pvt, 0, sizeof(rrd_simple_file_t));
150 rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
152 #ifdef DEBUG
153 if ((rdwr & (RRD_READONLY | RRD_READWRITE)) ==
154 (RRD_READONLY | RRD_READWRITE)) {
155 /* Both READONLY and READWRITE were given, which is invalid. */
156 rrd_set_error("in read/write request mask");
157 exit(-1);
158 }
159 #endif
161 #ifdef HAVE_MMAP
162 rrd_simple_file->mm_prot = PROT_READ;
163 rrd_simple_file->mm_flags = 0;
164 #endif
166 if (rdwr & RRD_READONLY) {
167 flags |= O_RDONLY;
168 #ifdef HAVE_MMAP
169 rrd_simple_file->mm_flags = MAP_PRIVATE;
170 # ifdef MAP_NORESERVE
171 rrd_simple_file->mm_flags |= MAP_NORESERVE; /* readonly, so no swap backing needed */
172 # endif
173 #endif
174 } else {
175 if (rdwr & RRD_READWRITE) {
176 flags |= O_RDWR;
177 #ifdef HAVE_MMAP
178 rrd_simple_file->mm_flags = MAP_SHARED;
179 rrd_simple_file->mm_prot |= PROT_WRITE;
180 #endif
181 }
182 if (rdwr & RRD_CREAT) {
183 flags |= (O_CREAT | O_TRUNC);
184 }
185 }
186 if (rdwr & RRD_READAHEAD) {
187 #ifdef MAP_POPULATE
188 rrd_simple_file->mm_flags |= MAP_POPULATE; /* populate ptes and data */
189 #endif
190 #if defined MAP_NONBLOCK
191 rrd_simple_file->mm_flags |= MAP_NONBLOCK; /* just populate ptes */
192 #endif
193 }
194 #if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)
195 flags |= O_BINARY;
196 #endif
198 if ((rrd_simple_file->fd = open(file_name, flags, 0666)) < 0) {
199 rrd_set_error("opening '%s': %s", file_name, rrd_strerror(errno));
200 goto out_free;
201 }
203 #ifdef HAVE_MMAP
204 #ifdef HAVE_BROKEN_MS_ASYNC
205 if (rdwr & RRD_READWRITE) {
206 /* some unices, the files mtime does not get update
207 on msync MS_ASYNC, in order to help them,
208 we update the the timestamp at this point.
209 The thing happens pretty 'close' to the open
210 call so the chances of a race should be minimal.
212 Maybe ask your vendor to fix your OS ... */
213 utime(file_name,NULL);
214 }
215 #endif
216 #endif
218 /* Better try to avoid seeks as much as possible. stat may be heavy but
219 * many concurrent seeks are even worse. */
220 if (newfile_size == 0 && ((fstat(rrd_simple_file->fd, &statb)) < 0)) {
221 rrd_set_error("fstat '%s': %s", file_name, rrd_strerror(errno));
222 goto out_close;
223 }
224 if (newfile_size == 0) {
225 rrd_file->file_len = statb.st_size;
226 } else {
227 rrd_file->file_len = newfile_size;
228 lseek(rrd_simple_file->fd, newfile_size - 1, SEEK_SET);
229 write(rrd_simple_file->fd, "\0", 1); /* poke */
230 lseek(rrd_simple_file->fd, 0, SEEK_SET);
231 }
232 #ifdef HAVE_POSIX_FADVISE
233 /* In general we need no read-ahead when dealing with rrd_files.
234 When we stop reading, it is highly unlikely that we start up again.
235 In this manner we actually save time and diskaccess (and buffer cache).
236 Thanks to Dave Plonka for the Idea of using POSIX_FADV_RANDOM here. */
237 posix_fadvise(rrd_simple_file->fd, 0, 0, POSIX_FADV_RANDOM);
238 #endif
240 /*
241 if (rdwr & RRD_READWRITE)
242 {
243 if (setvbuf((rrd_simple_file->fd),NULL,_IONBF,2)) {
244 rrd_set_error("failed to disable the stream buffer\n");
245 return (-1);
246 }
247 }
248 */
250 #ifdef HAVE_MMAP
251 data = mmap(0, rrd_file->file_len,
252 rrd_simple_file->mm_prot, rrd_simple_file->mm_flags,
253 rrd_simple_file->fd, offset);
255 /* lets see if the first read worked */
256 if (data == MAP_FAILED) {
257 rrd_set_error("mmaping file '%s': %s", file_name,
258 rrd_strerror(errno));
259 goto out_close;
260 }
261 rrd_simple_file->file_start = data;
262 if (rdwr & RRD_CREAT) {
263 memset(data, DNAN, newfile_size - 1);
264 goto out_done;
265 }
266 #endif
267 if (rdwr & RRD_CREAT)
268 goto out_done;
269 #ifdef USE_MADVISE
270 if (rdwr & RRD_COPY) {
271 /* We will read everything in a moment (copying) */
272 madvise(data, rrd_file->file_len, MADV_WILLNEED | MADV_SEQUENTIAL);
273 } else {
274 /* We do not need to read anything in for the moment */
275 madvise(data, rrd_file->file_len, MADV_RANDOM);
276 /* the stat_head will be needed soonish, so hint accordingly */
277 madvise(data, sizeof(stat_head_t), MADV_WILLNEED | MADV_RANDOM);
278 }
279 #endif
281 __rrd_read(rrd->stat_head, stat_head_t,
282 1);
284 /* lets do some test if we are on track ... */
285 if (memcmp(rrd->stat_head->cookie, RRD_COOKIE, sizeof(RRD_COOKIE)) != 0) {
286 rrd_set_error("'%s' is not an RRD file", file_name);
287 goto out_nullify_head;
288 }
290 if (rrd->stat_head->float_cookie != FLOAT_COOKIE) {
291 rrd_set_error("This RRD was created on another architecture");
292 goto out_nullify_head;
293 }
295 version = atoi(rrd->stat_head->version);
297 if (version > atoi(RRD_VERSION)) {
298 rrd_set_error("can't handle RRD file version %s",
299 rrd->stat_head->version);
300 goto out_nullify_head;
301 }
302 #if defined USE_MADVISE
303 /* the ds_def will be needed soonish, so hint accordingly */
304 madvise(data + PAGE_START(offset),
305 sizeof(ds_def_t) * rrd->stat_head->ds_cnt, MADV_WILLNEED);
306 #endif
307 __rrd_read(rrd->ds_def, ds_def_t,
308 rrd->stat_head->ds_cnt);
310 #if defined USE_MADVISE
311 /* the rra_def will be needed soonish, so hint accordingly */
312 madvise(data + PAGE_START(offset),
313 sizeof(rra_def_t) * rrd->stat_head->rra_cnt, MADV_WILLNEED);
314 #endif
315 __rrd_read(rrd->rra_def, rra_def_t,
316 rrd->stat_head->rra_cnt);
318 /* handle different format for the live_head */
319 if (version < 3) {
320 rrd->live_head = (live_head_t *) malloc(sizeof(live_head_t));
321 if (rrd->live_head == NULL) {
322 rrd_set_error("live_head_t malloc");
323 goto out_close;
324 }
325 #if defined USE_MADVISE
326 /* the live_head will be needed soonish, so hint accordingly */
327 madvise(data + PAGE_START(offset), sizeof(time_t), MADV_WILLNEED);
328 #endif
329 __rrd_read(rrd->legacy_last_up, time_t,
330 1);
332 rrd->live_head->last_up = *rrd->legacy_last_up;
333 rrd->live_head->last_up_usec = 0;
334 } else {
335 #if defined USE_MADVISE
336 /* the live_head will be needed soonish, so hint accordingly */
337 madvise(data + PAGE_START(offset),
338 sizeof(live_head_t), MADV_WILLNEED);
339 #endif
340 __rrd_read(rrd->live_head, live_head_t,
341 1);
342 }
343 __rrd_read(rrd->pdp_prep, pdp_prep_t,
344 rrd->stat_head->ds_cnt);
345 __rrd_read(rrd->cdp_prep, cdp_prep_t,
346 rrd->stat_head->rra_cnt * rrd->stat_head->ds_cnt);
347 __rrd_read(rrd->rra_ptr, rra_ptr_t,
348 rrd->stat_head->rra_cnt);
350 rrd_file->header_len = offset;
351 rrd_file->pos = offset;
353 {
354 unsigned long row_cnt = 0;
356 for (ui=0; ui<rrd->stat_head->rra_cnt; ui++)
357 row_cnt += rrd->rra_def[ui].row_cnt;
359 size_t correct_len = rrd_file->header_len +
360 sizeof(rrd_value_t) * row_cnt * rrd->stat_head->ds_cnt;
362 if (correct_len > rrd_file->file_len)
363 {
364 rrd_set_error("'%s' is too small (should be %ld bytes)",
365 file_name, (long long) correct_len);
366 goto out_nullify_head;
367 }
368 }
370 out_done:
371 return (rrd_file);
372 out_nullify_head:
373 rrd->stat_head = NULL;
374 out_close:
375 #ifdef HAVE_MMAP
376 if (data != MAP_FAILED)
377 munmap(data, rrd_file->file_len);
378 #endif
380 close(rrd_simple_file->fd);
381 out_free:
382 free(rrd_file->pvt);
383 free(rrd_file);
384 return NULL;
385 }
388 #if defined DEBUG && DEBUG > 1
389 /* Print list of in-core pages of a the current rrd_file. */
390 static
391 void mincore_print(
392 rrd_file_t *rrd_file,
393 char *mark)
394 {
395 rrd_simple_file_t *rrd_simple_file;
396 rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
397 #ifdef HAVE_MMAP
398 /* pretty print blocks in core */
399 size_t off;
400 unsigned char *vec;
401 ssize_t _page_size = sysconf(_SC_PAGESIZE);
403 off = rrd_file->file_len +
404 ((rrd_file->file_len + _page_size - 1) / _page_size);
405 vec = malloc(off);
406 if (vec != NULL) {
407 memset(vec, 0, off);
408 if (mincore(rrd_simple_file->file_start, rrd_file->file_len, vec) == 0) {
409 int prev;
410 unsigned is_in = 0, was_in = 0;
412 for (off = 0, prev = 0; off < rrd_file->file_len; ++off) {
413 is_in = vec[off] & 1; /* if lsb set then is core resident */
414 if (off == 0)
415 was_in = is_in;
416 if (was_in != is_in) {
417 fprintf(stderr, "%s: %sin core: %p len %ld\n", mark,
418 was_in ? "" : "not ", vec + prev, off - prev);
419 was_in = is_in;
420 prev = off;
421 }
422 }
423 fprintf(stderr,
424 "%s: %sin core: %p len %ld\n", mark,
425 was_in ? "" : "not ", vec + prev, off - prev);
426 } else
427 fprintf(stderr, "mincore: %s", rrd_strerror(errno));
428 }
429 #else
430 fprintf(stderr, "sorry mincore only works with mmap");
431 #endif
432 }
433 #endif /* defined DEBUG && DEBUG > 1 */
435 /*
436 * get exclusive lock to whole file.
437 * lock gets removed when we close the file
438 *
439 * returns 0 on success
440 */
441 int rrd_lock(
442 rrd_file_t *rrd_file)
443 {
444 int rcstat;
445 rrd_simple_file_t *rrd_simple_file;
446 rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
448 {
449 #if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)
450 struct _stat st;
452 if (_fstat(rrd_simple_file->fd, &st) == 0) {
453 rcstat = _locking(rrd_simple_file->fd, _LK_NBLCK, st.st_size);
454 } else {
455 rcstat = -1;
456 }
457 #else
458 struct flock lock;
460 lock.l_type = F_WRLCK; /* exclusive write lock */
461 lock.l_len = 0; /* whole file */
462 lock.l_start = 0; /* start of file */
463 lock.l_whence = SEEK_SET; /* end of file */
465 rcstat = fcntl(rrd_simple_file->fd, F_SETLK, &lock);
466 #endif
467 }
469 return (rcstat);
470 }
473 /* drop cache except for the header and the active pages */
474 void rrd_dontneed(
475 rrd_file_t *rrd_file,
476 rrd_t *rrd)
477 {
478 rrd_simple_file_t *rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
479 #if defined USE_MADVISE || defined HAVE_POSIX_FADVISE
480 size_t dontneed_start;
481 size_t rra_start;
482 size_t active_block;
483 unsigned long i;
484 ssize_t _page_size = sysconf(_SC_PAGESIZE);
486 if (rrd_file == NULL) {
487 #if defined DEBUG && DEBUG
488 fprintf (stderr, "rrd_dontneed: Argument 'rrd_file' is NULL.\n");
489 #endif
490 return;
491 }
493 #if defined DEBUG && DEBUG > 1
494 mincore_print(rrd_file, "before");
495 #endif
497 /* ignoring errors from RRDs that are smaller then the file_len+rounding */
498 rra_start = rrd_file->header_len;
499 dontneed_start = PAGE_START(rra_start) + _page_size;
500 for (i = 0; i < rrd->stat_head->rra_cnt; ++i) {
501 active_block =
502 PAGE_START(rra_start
503 + rrd->rra_ptr[i].cur_row
504 * rrd->stat_head->ds_cnt * sizeof(rrd_value_t));
505 if (active_block > dontneed_start) {
506 #ifdef USE_MADVISE
507 madvise(rrd_simple_file->file_start + dontneed_start,
508 active_block - dontneed_start - 1, MADV_DONTNEED);
509 #endif
510 /* in linux at least only fadvise DONTNEED seems to purge pages from cache */
511 #ifdef HAVE_POSIX_FADVISE
512 posix_fadvise(rrd_simple_file->fd, dontneed_start,
513 active_block - dontneed_start - 1,
514 POSIX_FADV_DONTNEED);
515 #endif
516 }
517 dontneed_start = active_block;
518 /* do not release 'hot' block if update for this RAA will occur
519 * within 10 minutes */
520 if (rrd->stat_head->pdp_step * rrd->rra_def[i].pdp_cnt -
521 rrd->live_head->last_up % (rrd->stat_head->pdp_step *
522 rrd->rra_def[i].pdp_cnt) < 10 * 60) {
523 dontneed_start += _page_size;
524 }
525 rra_start +=
526 rrd->rra_def[i].row_cnt * rrd->stat_head->ds_cnt *
527 sizeof(rrd_value_t);
528 }
530 if (dontneed_start < rrd_file->file_len) {
531 #ifdef USE_MADVISE
532 madvise(rrd_simple_file->file_start + dontneed_start,
533 rrd_file->file_len - dontneed_start, MADV_DONTNEED);
534 #endif
535 #ifdef HAVE_POSIX_FADVISE
536 posix_fadvise(rrd_simple_file->fd, dontneed_start,
537 rrd_file->file_len - dontneed_start,
538 POSIX_FADV_DONTNEED);
539 #endif
540 }
542 #if defined DEBUG && DEBUG > 1
543 mincore_print(rrd_file, "after");
544 #endif
545 #endif /* without madvise and posix_fadvise ist does not make much sense todo anything */
546 }
552 int rrd_close(
553 rrd_file_t *rrd_file)
554 {
555 rrd_simple_file_t *rrd_simple_file;
556 rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
557 int ret;
559 #ifdef HAVE_MMAP
560 ret = msync(rrd_simple_file->file_start, rrd_file->file_len, MS_ASYNC);
561 if (ret != 0)
562 rrd_set_error("msync rrd_file: %s", rrd_strerror(errno));
563 ret = munmap(rrd_simple_file->file_start, rrd_file->file_len);
564 if (ret != 0)
565 rrd_set_error("munmap rrd_file: %s", rrd_strerror(errno));
566 #endif
567 ret = close(rrd_simple_file->fd);
568 if (ret != 0)
569 rrd_set_error("closing file: %s", rrd_strerror(errno));
570 free(rrd_file->pvt);
571 free(rrd_file);
572 rrd_file = NULL;
573 return ret;
574 }
577 /* Set position of rrd_file. */
579 off_t rrd_seek(
580 rrd_file_t *rrd_file,
581 off_t off,
582 int whence)
583 {
584 off_t ret = 0;
585 rrd_simple_file_t *rrd_simple_file;
586 rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
588 #ifdef HAVE_MMAP
589 if (whence == SEEK_SET)
590 rrd_file->pos = off;
591 else if (whence == SEEK_CUR)
592 rrd_file->pos += off;
593 else if (whence == SEEK_END)
594 rrd_file->pos = rrd_file->file_len + off;
595 #else
596 ret = lseek(rrd_simple_file->fd, off, whence);
597 if (ret < 0)
598 rrd_set_error("lseek: %s", rrd_strerror(errno));
599 rrd_file->pos = ret;
600 #endif
601 /* mimic fseek, which returns 0 upon success */
602 return ret < 0; /*XXX: or just ret to mimic lseek */
603 }
606 /* Get current position in rrd_file. */
608 off_t rrd_tell(
609 rrd_file_t *rrd_file)
610 {
611 return rrd_file->pos;
612 }
615 /* Read count bytes into buffer buf, starting at rrd_file->pos.
616 * Returns the number of bytes read or <0 on error. */
618 ssize_t rrd_read(
619 rrd_file_t *rrd_file,
620 void *buf,
621 size_t count)
622 {
623 rrd_simple_file_t *rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
624 #ifdef HAVE_MMAP
625 size_t _cnt = count;
626 ssize_t _surplus;
628 if (rrd_file->pos > rrd_file->file_len || _cnt == 0) /* EOF */
629 return 0;
630 if (buf == NULL)
631 return -1; /* EINVAL */
632 _surplus = rrd_file->pos + _cnt - rrd_file->file_len;
633 if (_surplus > 0) { /* short read */
634 _cnt -= _surplus;
635 }
636 if (_cnt == 0)
637 return 0; /* EOF */
638 buf = memcpy(buf, rrd_simple_file->file_start + rrd_file->pos, _cnt);
640 rrd_file->pos += _cnt; /* mimmic read() semantics */
641 return _cnt;
642 #else
643 ssize_t ret;
645 ret = read(rrd_simple_file->fd, buf, count);
646 if (ret > 0)
647 rrd_file->pos += ret; /* mimmic read() semantics */
648 return ret;
649 #endif
650 }
653 /* Write count bytes from buffer buf to the current position
654 * rrd_file->pos of rrd_simple_file->fd.
655 * Returns the number of bytes written or <0 on error. */
657 ssize_t rrd_write(
658 rrd_file_t *rrd_file,
659 const void *buf,
660 size_t count)
661 {
662 rrd_simple_file_t *rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
663 #ifdef HAVE_MMAP
664 size_t old_size = rrd_file->file_len;
665 if (count == 0)
666 return 0;
667 if (buf == NULL)
668 return -1; /* EINVAL */
670 if((rrd_file->pos + count) > old_size)
671 {
672 rrd_set_error("attempting to write beyond end of file");
673 return -1;
674 }
675 memcpy(rrd_simple_file->file_start + rrd_file->pos, buf, count);
676 rrd_file->pos += count;
677 return count; /* mimmic write() semantics */
678 #else
679 ssize_t _sz = write(rrd_simple_file->fd, buf, count);
681 if (_sz > 0)
682 rrd_file->pos += _sz;
683 return _sz;
684 #endif
685 }
688 /* flush all data pending to be written to FD. */
690 void rrd_flush(
691 rrd_file_t *rrd_file)
692 {
693 #ifndef WIN32
694 rrd_simple_file_t *rrd_simple_file;
695 rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
696 if (fdatasync(rrd_simple_file->fd) != 0) {
697 rrd_set_error("flushing fd %d: %s", rrd_simple_file->fd,
698 rrd_strerror(errno));
699 }
700 #endif
701 }
704 /* Initialize RRD header. */
706 void rrd_init(
707 rrd_t *rrd)
708 {
709 rrd->stat_head = NULL;
710 rrd->ds_def = NULL;
711 rrd->rra_def = NULL;
712 rrd->live_head = NULL;
713 rrd->legacy_last_up = NULL;
714 rrd->rra_ptr = NULL;
715 rrd->pdp_prep = NULL;
716 rrd->cdp_prep = NULL;
717 rrd->rrd_value = NULL;
718 }
721 /* free RRD header data. */
723 #ifdef HAVE_MMAP
724 void rrd_free(
725 rrd_t *rrd)
726 {
727 if (rrd->legacy_last_up) { /* this gets set for version < 3 only */
728 free(rrd->live_head);
729 }
730 }
731 #else
732 void rrd_free(
733 rrd_t *rrd)
734 {
735 free(rrd->live_head);
736 free(rrd->stat_head);
737 free(rrd->ds_def);
738 free(rrd->rra_def);
739 free(rrd->rra_ptr);
740 free(rrd->pdp_prep);
741 free(rrd->cdp_prep);
742 free(rrd->rrd_value);
743 }
744 #endif
747 /* routine used by external libraries to free memory allocated by
748 * rrd library */
750 void rrd_freemem(
751 void *mem)
752 {
753 free(mem);
754 }
756 /*
757 * rra_update informs us about the RRAs being updated
758 * The low level storage API may use this information for
759 * aligning RRAs within stripes, or other performance enhancements
760 */
761 void rrd_notify_row(
762 rrd_file_t *rrd_file __attribute__((unused)),
763 int rra_idx __attribute__((unused)),
764 unsigned long rra_row __attribute__((unused)),
765 time_t rra_time __attribute__((unused)))
766 {
767 }
769 /*
770 * This function is called when creating a new RRD
771 * The storage implementation can use this opportunity to select
772 * a sensible starting row within the file.
773 * The default implementation is random, to ensure that all RRAs
774 * don't change to a new disk block at the same time
775 */
776 unsigned long rrd_select_initial_row(
777 rrd_file_t *rrd_file __attribute__((unused)),
778 int rra_idx __attribute__((unused)),
779 rra_def_t *rra
780 )
781 {
782 return rra_random_row(rra);
783 }
785 static int rand_init = 0;
787 long int rra_random_row(
788 rra_def_t *rra)
789 {
790 if (!rand_init) {
791 srandom((unsigned int) time(NULL) + (unsigned int) getpid());
792 rand_init++;
793 }
795 return random() % rra->row_cnt;
796 }