1 /*****************************************************************************
2 * RRDtool 1.4.5 Copyright by Tobi Oetiker, 1997-2010
3 *****************************************************************************
4 * rrd_open.c Open an RRD File
5 *****************************************************************************
6 * $Id$
7 *****************************************************************************/
9 #include "rrd_tool.h"
10 #include "unused.h"
12 #ifdef WIN32
13 #include <stdlib.h>
14 #include <fcntl.h>
15 #include <sys/stat.h>
16 #endif
18 #ifdef HAVE_BROKEN_MS_ASYNC
19 #include <sys/types.h>
20 #include <utime.h>
21 #endif
23 #define MEMBLK 8192
25 #ifdef WIN32
26 #define _LK_UNLCK 0 /* Unlock */
27 #define _LK_LOCK 1 /* Lock */
28 #define _LK_NBLCK 2 /* Non-blocking lock */
29 #define _LK_RLCK 3 /* Lock for read only */
30 #define _LK_NBRLCK 4 /* Non-blocking lock for read only */
33 #define LK_UNLCK _LK_UNLCK
34 #define LK_LOCK _LK_LOCK
35 #define LK_NBLCK _LK_NBLCK
36 #define LK_RLCK _LK_RLCK
37 #define LK_NBRLCK _LK_NBRLCK
38 #endif
40 /* DEBUG 2 prints information obtained via mincore(2) */
41 #define DEBUG 1
42 /* do not calculate exact madvise hints but assume 1 page for headers and
43 * set DONTNEED for the rest, which is assumed to be data */
44 /* Avoid calling madvise on areas that were already hinted. May be benefical if
45 * your syscalls are very slow */
47 #ifdef HAVE_MMAP
48 /* the cast to void* is there to avoid this warning seen on ia64 with certain
49 versions of gcc: 'cast increases required alignment of target type'
50 */
51 #define __rrd_read(dst, dst_t, cnt) { \
52 size_t wanted = sizeof(dst_t)*(cnt); \
53 if (offset + wanted > rrd_file->file_len) { \
54 rrd_set_error("reached EOF while loading header " #dst); \
55 goto out_nullify_head; \
56 } \
57 (dst) = (dst_t*)(void*) (data + offset); \
58 offset += wanted; \
59 }
60 #else
61 #define __rrd_read(dst, dst_t, cnt) { \
62 size_t wanted = sizeof(dst_t)*(cnt); \
63 size_t got; \
64 if ((dst = (dst_t*)malloc(wanted)) == NULL) { \
65 rrd_set_error(#dst " malloc"); \
66 goto out_nullify_head; \
67 } \
68 got = read (rrd_simple_file->fd, dst, wanted); \
69 if (got != wanted) { \
70 rrd_set_error("short read while reading header " #dst); \
71 goto out_nullify_head; \
72 } \
73 offset += got; \
74 }
75 #endif
77 /* get the address of the start of this page */
78 #if defined USE_MADVISE || defined HAVE_POSIX_FADVISE
79 #ifndef PAGE_START
80 #define PAGE_START(addr) ((addr)&(~(_page_size-1)))
81 #endif
82 #endif
84 /* Open a database file, return its header and an open filehandle,
85 * positioned to the first cdp in the first rra.
86 * In the error path of rrd_open, only rrd_free(&rrd) has to be called
87 * before returning an error. Do not call rrd_close upon failure of rrd_open.
88 * If creating a new file, the parameter rrd must be initialised with
89 * details of the file content.
90 * If opening an existing file, then use rrd must be initialised by
91 * rrd_init(rrd) prior to invoking rrd_open
92 */
94 rrd_file_t *rrd_open(
95 const char *const file_name,
96 rrd_t *rrd,
97 unsigned rdwr)
98 {
99 unsigned long ui;
100 int flags = 0;
101 int version;
103 #ifdef HAVE_MMAP
104 ssize_t _page_size = sysconf(_SC_PAGESIZE);
105 char *data = MAP_FAILED;
106 #endif
107 off_t offset = 0;
108 struct stat statb;
109 rrd_file_t *rrd_file = NULL;
110 rrd_simple_file_t *rrd_simple_file = NULL;
111 size_t newfile_size = 0;
112 size_t header_len, value_cnt, data_len;
114 /* Are we creating a new file? */
115 if((rdwr & RRD_CREAT) && (rrd->stat_head != NULL))
116 {
117 header_len = rrd_get_header_size(rrd);
119 value_cnt = 0;
120 for (ui = 0; ui < rrd->stat_head->rra_cnt; ui++)
121 value_cnt += rrd->stat_head->ds_cnt * rrd->rra_def[ui].row_cnt;
123 data_len = sizeof(rrd_value_t) * value_cnt;
125 newfile_size = header_len + data_len;
126 }
128 rrd_file = (rrd_file_t*)malloc(sizeof(rrd_file_t));
129 if (rrd_file == NULL) {
130 rrd_set_error("allocating rrd_file descriptor for '%s'", file_name);
131 return NULL;
132 }
133 memset(rrd_file, 0, sizeof(rrd_file_t));
135 rrd_file->pvt = malloc(sizeof(rrd_simple_file_t));
136 if(rrd_file->pvt == NULL) {
137 rrd_set_error("allocating rrd_simple_file for '%s'", file_name);
138 return NULL;
139 }
140 memset(rrd_file->pvt, 0, sizeof(rrd_simple_file_t));
141 rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
143 #ifdef DEBUG
144 if ((rdwr & (RRD_READONLY | RRD_READWRITE)) ==
145 (RRD_READONLY | RRD_READWRITE)) {
146 /* Both READONLY and READWRITE were given, which is invalid. */
147 rrd_set_error("in read/write request mask");
148 exit(-1);
149 }
150 #endif
152 #ifdef HAVE_MMAP
153 rrd_simple_file->mm_prot = PROT_READ;
154 rrd_simple_file->mm_flags = 0;
155 #endif
157 if (rdwr & RRD_READONLY) {
158 flags |= O_RDONLY;
159 #ifdef HAVE_MMAP
160 # if !defined(AIX)
161 rrd_simple_file->mm_flags = MAP_PRIVATE;
162 # endif
163 # ifdef MAP_NORESERVE
164 rrd_simple_file->mm_flags |= MAP_NORESERVE; /* readonly, so no swap backing needed */
165 # endif
166 #endif
167 } else {
168 if (rdwr & RRD_READWRITE) {
169 flags |= O_RDWR;
170 #ifdef HAVE_MMAP
171 rrd_simple_file->mm_flags = MAP_SHARED;
172 rrd_simple_file->mm_prot |= PROT_WRITE;
173 #endif
174 }
175 if (rdwr & RRD_CREAT) {
176 flags |= (O_CREAT | O_TRUNC);
177 }
178 if (rdwr & RRD_EXCL) {
179 flags |= O_EXCL;
180 }
181 }
182 if (rdwr & RRD_READAHEAD) {
183 #ifdef MAP_POPULATE
184 rrd_simple_file->mm_flags |= MAP_POPULATE; /* populate ptes and data */
185 #endif
186 #if defined MAP_NONBLOCK
187 rrd_simple_file->mm_flags |= MAP_NONBLOCK; /* just populate ptes */
188 #endif
189 }
190 #if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)
191 flags |= O_BINARY;
192 #endif
194 if ((rrd_simple_file->fd = open(file_name, flags, 0666)) < 0) {
195 rrd_set_error("opening '%s': %s", file_name, rrd_strerror(errno));
196 goto out_free;
197 }
199 #ifdef HAVE_MMAP
200 #ifdef HAVE_BROKEN_MS_ASYNC
201 if (rdwr & RRD_READWRITE) {
202 /* some unices, the files mtime does not get update
203 on msync MS_ASYNC, in order to help them,
204 we update the the timestamp at this point.
205 The thing happens pretty 'close' to the open
206 call so the chances of a race should be minimal.
208 Maybe ask your vendor to fix your OS ... */
209 utime(file_name,NULL);
210 }
211 #endif
212 #endif
214 /* Better try to avoid seeks as much as possible. stat may be heavy but
215 * many concurrent seeks are even worse. */
216 if (newfile_size == 0 && ((fstat(rrd_simple_file->fd, &statb)) < 0)) {
217 rrd_set_error("fstat '%s': %s", file_name, rrd_strerror(errno));
218 goto out_close;
219 }
220 if (newfile_size == 0) {
221 rrd_file->file_len = statb.st_size;
222 } else {
223 rrd_file->file_len = newfile_size;
224 lseek(rrd_simple_file->fd, newfile_size - 1, SEEK_SET);
225 if ( write(rrd_simple_file->fd, "\0", 1) == -1){ /* poke */
226 rrd_set_error("write '%s': %s", file_name, rrd_strerror(errno));
227 goto out_close;
228 }
229 lseek(rrd_simple_file->fd, 0, SEEK_SET);
230 }
231 #ifdef HAVE_POSIX_FADVISE
232 /* In general we need no read-ahead when dealing with rrd_files.
233 When we stop reading, it is highly unlikely that we start up again.
234 In this manner we actually save time and diskaccess (and buffer cache).
235 Thanks to Dave Plonka for the Idea of using POSIX_FADV_RANDOM here. */
236 posix_fadvise(rrd_simple_file->fd, 0, 0, POSIX_FADV_RANDOM);
237 #endif
239 /*
240 if (rdwr & RRD_READWRITE)
241 {
242 if (setvbuf((rrd_simple_file->fd),NULL,_IONBF,2)) {
243 rrd_set_error("failed to disable the stream buffer\n");
244 return (-1);
245 }
246 }
247 */
249 #ifdef HAVE_MMAP
250 data = mmap(0, rrd_file->file_len,
251 rrd_simple_file->mm_prot, rrd_simple_file->mm_flags,
252 rrd_simple_file->fd, offset);
254 /* lets see if the first read worked */
255 if (data == MAP_FAILED) {
256 rrd_set_error("mmaping file '%s': %s", file_name,
257 rrd_strerror(errno));
258 goto out_close;
259 }
260 rrd_simple_file->file_start = data;
261 if (rdwr & RRD_CREAT) {
262 memset(data, DNAN, newfile_size - 1);
263 goto out_done;
264 }
265 #endif
266 if (rdwr & RRD_CREAT)
267 goto out_done;
268 #ifdef USE_MADVISE
269 if (rdwr & RRD_COPY) {
270 /* We will read everything in a moment (copying) */
271 madvise(data, rrd_file->file_len, MADV_WILLNEED );
272 madvise(data, rrd_file->file_len, MADV_SEQUENTIAL );
273 } else {
274 /* We do not need to read anything in for the moment */
275 madvise(data, rrd_file->file_len, MADV_RANDOM);
276 /* the stat_head will be needed soonish, so hint accordingly */
277 madvise(data, sizeof(stat_head_t), MADV_WILLNEED);
278 madvise(data, sizeof(stat_head_t), MADV_RANDOM);
279 }
280 #endif
282 __rrd_read(rrd->stat_head, stat_head_t,
283 1);
285 /* lets do some test if we are on track ... */
286 if (memcmp(rrd->stat_head->cookie, RRD_COOKIE, sizeof(RRD_COOKIE)) != 0) {
287 rrd_set_error("'%s' is not an RRD file", file_name);
288 goto out_nullify_head;
289 }
291 if (rrd->stat_head->float_cookie != FLOAT_COOKIE) {
292 rrd_set_error("This RRD was created on another architecture");
293 goto out_nullify_head;
294 }
296 version = atoi(rrd->stat_head->version);
298 if (version > atoi(RRD_VERSION)) {
299 rrd_set_error("can't handle RRD file version %s",
300 rrd->stat_head->version);
301 goto out_nullify_head;
302 }
303 #if defined USE_MADVISE
304 /* the ds_def will be needed soonish, so hint accordingly */
305 madvise(data + PAGE_START(offset),
306 sizeof(ds_def_t) * rrd->stat_head->ds_cnt, MADV_WILLNEED);
307 #endif
308 __rrd_read(rrd->ds_def, ds_def_t,
309 rrd->stat_head->ds_cnt);
311 #if defined USE_MADVISE
312 /* the rra_def will be needed soonish, so hint accordingly */
313 madvise(data + PAGE_START(offset),
314 sizeof(rra_def_t) * rrd->stat_head->rra_cnt, MADV_WILLNEED);
315 #endif
316 __rrd_read(rrd->rra_def, rra_def_t,
317 rrd->stat_head->rra_cnt);
319 /* handle different format for the live_head */
320 if (version < 3) {
321 rrd->live_head = (live_head_t *) malloc(sizeof(live_head_t));
322 if (rrd->live_head == NULL) {
323 rrd_set_error("live_head_t malloc");
324 goto out_close;
325 }
326 #if defined USE_MADVISE
327 /* the live_head will be needed soonish, so hint accordingly */
328 madvise(data + PAGE_START(offset), sizeof(time_t), MADV_WILLNEED);
329 #endif
330 __rrd_read(rrd->legacy_last_up, time_t,
331 1);
333 rrd->live_head->last_up = *rrd->legacy_last_up;
334 rrd->live_head->last_up_usec = 0;
335 } else {
336 #if defined USE_MADVISE
337 /* the live_head will be needed soonish, so hint accordingly */
338 madvise(data + PAGE_START(offset),
339 sizeof(live_head_t), MADV_WILLNEED);
340 #endif
341 __rrd_read(rrd->live_head, live_head_t,
342 1);
343 }
344 __rrd_read(rrd->pdp_prep, pdp_prep_t,
345 rrd->stat_head->ds_cnt);
346 __rrd_read(rrd->cdp_prep, cdp_prep_t,
347 rrd->stat_head->rra_cnt * rrd->stat_head->ds_cnt);
348 __rrd_read(rrd->rra_ptr, rra_ptr_t,
349 rrd->stat_head->rra_cnt);
351 rrd_file->header_len = offset;
352 rrd_file->pos = offset;
354 {
355 unsigned long row_cnt = 0;
357 for (ui=0; ui<rrd->stat_head->rra_cnt; ui++)
358 row_cnt += rrd->rra_def[ui].row_cnt;
360 size_t correct_len = rrd_file->header_len +
361 sizeof(rrd_value_t) * row_cnt * rrd->stat_head->ds_cnt;
363 if (correct_len > rrd_file->file_len)
364 {
365 rrd_set_error("'%s' is too small (should be %ld bytes)",
366 file_name, (long long) correct_len);
367 goto out_nullify_head;
368 }
369 }
371 out_done:
372 return (rrd_file);
373 out_nullify_head:
374 rrd->stat_head = NULL;
375 out_close:
376 #ifdef HAVE_MMAP
377 if (data != MAP_FAILED)
378 munmap(data, rrd_file->file_len);
379 #endif
381 close(rrd_simple_file->fd);
382 out_free:
383 free(rrd_file->pvt);
384 free(rrd_file);
385 return NULL;
386 }
389 #if defined DEBUG && DEBUG > 1
390 /* Print list of in-core pages of a the current rrd_file. */
391 static
392 void mincore_print(
393 rrd_file_t *rrd_file,
394 char *mark)
395 {
396 rrd_simple_file_t *rrd_simple_file;
397 rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
398 #ifdef HAVE_MMAP
399 /* pretty print blocks in core */
400 size_t off;
401 unsigned char *vec;
402 ssize_t _page_size = sysconf(_SC_PAGESIZE);
404 off = rrd_file->file_len +
405 ((rrd_file->file_len + _page_size - 1) / _page_size);
406 vec = malloc(off);
407 if (vec != NULL) {
408 memset(vec, 0, off);
409 if (mincore(rrd_simple_file->file_start, rrd_file->file_len, vec) == 0) {
410 int prev;
411 unsigned is_in = 0, was_in = 0;
413 for (off = 0, prev = 0; off < rrd_file->file_len; ++off) {
414 is_in = vec[off] & 1; /* if lsb set then is core resident */
415 if (off == 0)
416 was_in = is_in;
417 if (was_in != is_in) {
418 fprintf(stderr, "%s: %sin core: %p len %ld\n", mark,
419 was_in ? "" : "not ", vec + prev, off - prev);
420 was_in = is_in;
421 prev = off;
422 }
423 }
424 fprintf(stderr,
425 "%s: %sin core: %p len %ld\n", mark,
426 was_in ? "" : "not ", vec + prev, off - prev);
427 } else
428 fprintf(stderr, "mincore: %s", rrd_strerror(errno));
429 }
430 #else
431 fprintf(stderr, "sorry mincore only works with mmap");
432 #endif
433 }
434 #endif /* defined DEBUG && DEBUG > 1 */
436 /*
437 * get exclusive lock to whole file.
438 * lock gets removed when we close the file
439 *
440 * returns 0 on success
441 */
442 int rrd_lock(
443 rrd_file_t *rrd_file)
444 {
445 int rcstat;
446 rrd_simple_file_t *rrd_simple_file;
447 rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
449 {
450 #if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)
451 struct _stat st;
453 if (_fstat(rrd_simple_file->fd, &st) == 0) {
454 rcstat = _locking(rrd_simple_file->fd, _LK_NBLCK, st.st_size);
455 } else {
456 rcstat = -1;
457 }
458 #else
459 struct flock lock;
461 lock.l_type = F_WRLCK; /* exclusive write lock */
462 lock.l_len = 0; /* whole file */
463 lock.l_start = 0; /* start of file */
464 lock.l_whence = SEEK_SET; /* end of file */
466 rcstat = fcntl(rrd_simple_file->fd, F_SETLK, &lock);
467 #endif
468 }
470 return (rcstat);
471 }
474 /* drop cache except for the header and the active pages */
475 void rrd_dontneed(
476 rrd_file_t *rrd_file,
477 rrd_t *rrd)
478 {
479 rrd_simple_file_t *rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
480 #if defined USE_MADVISE || defined HAVE_POSIX_FADVISE
481 size_t dontneed_start;
482 size_t rra_start;
483 size_t active_block;
484 size_t i;
485 ssize_t _page_size = sysconf(_SC_PAGESIZE);
487 if (rrd_file == NULL) {
488 #if defined DEBUG && DEBUG
489 fprintf (stderr, "rrd_dontneed: Argument 'rrd_file' is NULL.\n");
490 #endif
491 return;
492 }
494 #if defined DEBUG && DEBUG > 1
495 mincore_print(rrd_file, "before");
496 #endif
498 /* ignoring errors from RRDs that are smaller then the file_len+rounding */
499 rra_start = rrd_file->header_len;
500 dontneed_start = PAGE_START(rra_start) + _page_size;
501 for (i = 0; i < rrd->stat_head->rra_cnt; ++i) {
502 active_block =
503 PAGE_START(rra_start
504 + rrd->rra_ptr[i].cur_row
505 * rrd->stat_head->ds_cnt * sizeof(rrd_value_t));
506 if (active_block > dontneed_start) {
507 #ifdef USE_MADVISE
508 madvise(rrd_simple_file->file_start + dontneed_start,
509 active_block - dontneed_start - 1, MADV_DONTNEED);
510 #endif
511 /* in linux at least only fadvise DONTNEED seems to purge pages from cache */
512 #ifdef HAVE_POSIX_FADVISE
513 posix_fadvise(rrd_simple_file->fd, dontneed_start,
514 active_block - dontneed_start - 1,
515 POSIX_FADV_DONTNEED);
516 #endif
517 }
518 dontneed_start = active_block;
519 /* do not release 'hot' block if update for this RAA will occur
520 * within 10 minutes */
521 if (rrd->stat_head->pdp_step * rrd->rra_def[i].pdp_cnt -
522 rrd->live_head->last_up % (rrd->stat_head->pdp_step *
523 rrd->rra_def[i].pdp_cnt) < 10 * 60) {
524 dontneed_start += _page_size;
525 }
526 rra_start +=
527 rrd->rra_def[i].row_cnt * rrd->stat_head->ds_cnt *
528 sizeof(rrd_value_t);
529 }
531 if (dontneed_start < rrd_file->file_len) {
532 #ifdef USE_MADVISE
533 madvise(rrd_simple_file->file_start + dontneed_start,
534 rrd_file->file_len - dontneed_start, MADV_DONTNEED);
535 #endif
536 #ifdef HAVE_POSIX_FADVISE
537 posix_fadvise(rrd_simple_file->fd, dontneed_start,
538 rrd_file->file_len - dontneed_start,
539 POSIX_FADV_DONTNEED);
540 #endif
541 }
543 #if defined DEBUG && DEBUG > 1
544 mincore_print(rrd_file, "after");
545 #endif
546 #endif /* without madvise and posix_fadvise it does not make much sense todo anything */
547 }
553 int rrd_close(
554 rrd_file_t *rrd_file)
555 {
556 rrd_simple_file_t *rrd_simple_file;
557 rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
558 int ret;
560 #ifdef HAVE_MMAP
561 ret = msync(rrd_simple_file->file_start, rrd_file->file_len, MS_ASYNC);
562 if (ret != 0)
563 rrd_set_error("msync rrd_file: %s", rrd_strerror(errno));
564 ret = munmap(rrd_simple_file->file_start, rrd_file->file_len);
565 if (ret != 0)
566 rrd_set_error("munmap rrd_file: %s", rrd_strerror(errno));
567 #endif
568 ret = close(rrd_simple_file->fd);
569 if (ret != 0)
570 rrd_set_error("closing file: %s", rrd_strerror(errno));
571 free(rrd_file->pvt);
572 free(rrd_file);
573 rrd_file = NULL;
574 return ret;
575 }
578 /* Set position of rrd_file. */
580 off_t rrd_seek(
581 rrd_file_t *rrd_file,
582 off_t off,
583 int whence)
584 {
585 off_t ret = 0;
586 rrd_simple_file_t *rrd_simple_file;
587 rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
589 #ifdef HAVE_MMAP
590 if (whence == SEEK_SET)
591 rrd_file->pos = off;
592 else if (whence == SEEK_CUR)
593 rrd_file->pos += off;
594 else if (whence == SEEK_END)
595 rrd_file->pos = rrd_file->file_len + off;
596 #else
597 ret = lseek(rrd_simple_file->fd, off, whence);
598 if (ret < 0)
599 rrd_set_error("lseek: %s", rrd_strerror(errno));
600 rrd_file->pos = ret;
601 #endif
602 /* mimic fseek, which returns 0 upon success */
603 return ret < 0; /*XXX: or just ret to mimic lseek */
604 }
607 /* Get current position in rrd_file. */
609 off_t rrd_tell(
610 rrd_file_t *rrd_file)
611 {
612 return rrd_file->pos;
613 }
616 /* Read count bytes into buffer buf, starting at rrd_file->pos.
617 * Returns the number of bytes read or <0 on error. */
619 ssize_t rrd_read(
620 rrd_file_t *rrd_file,
621 void *buf,
622 size_t count)
623 {
624 rrd_simple_file_t *rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
625 #ifdef HAVE_MMAP
626 size_t _cnt = count;
627 ssize_t _surplus;
629 if (rrd_file->pos > rrd_file->file_len || _cnt == 0) /* EOF */
630 return 0;
631 if (buf == NULL)
632 return -1; /* EINVAL */
633 _surplus = rrd_file->pos + _cnt - rrd_file->file_len;
634 if (_surplus > 0) { /* short read */
635 _cnt -= _surplus;
636 }
637 if (_cnt == 0)
638 return 0; /* EOF */
639 buf = memcpy(buf, rrd_simple_file->file_start + rrd_file->pos, _cnt);
641 rrd_file->pos += _cnt; /* mimmic read() semantics */
642 return _cnt;
643 #else
644 ssize_t ret;
646 ret = read(rrd_simple_file->fd, buf, count);
647 if (ret > 0)
648 rrd_file->pos += ret; /* mimmic read() semantics */
649 return ret;
650 #endif
651 }
654 /* Write count bytes from buffer buf to the current position
655 * rrd_file->pos of rrd_simple_file->fd.
656 * Returns the number of bytes written or <0 on error. */
658 ssize_t rrd_write(
659 rrd_file_t *rrd_file,
660 const void *buf,
661 size_t count)
662 {
663 rrd_simple_file_t *rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
664 #ifdef HAVE_MMAP
665 size_t old_size = rrd_file->file_len;
666 if (count == 0)
667 return 0;
668 if (buf == NULL)
669 return -1; /* EINVAL */
671 if((rrd_file->pos + count) > old_size)
672 {
673 rrd_set_error("attempting to write beyond end of file");
674 return -1;
675 }
676 memcpy(rrd_simple_file->file_start + rrd_file->pos, buf, count);
677 rrd_file->pos += count;
678 return count; /* mimmic write() semantics */
679 #else
680 ssize_t _sz = write(rrd_simple_file->fd, buf, count);
682 if (_sz > 0)
683 rrd_file->pos += _sz;
684 return _sz;
685 #endif
686 }
689 /* this is a leftover from the old days, it serves no purpose
690 and is therefore turned into a no-op */
691 void rrd_flush(
692 rrd_file_t UNUSED(*rrd_file))
693 {
694 }
696 /* Initialize RRD header. */
698 void rrd_init(
699 rrd_t *rrd)
700 {
701 rrd->stat_head = NULL;
702 rrd->ds_def = NULL;
703 rrd->rra_def = NULL;
704 rrd->live_head = NULL;
705 rrd->legacy_last_up = NULL;
706 rrd->rra_ptr = NULL;
707 rrd->pdp_prep = NULL;
708 rrd->cdp_prep = NULL;
709 rrd->rrd_value = NULL;
710 }
713 /* free RRD header data. */
715 #ifdef HAVE_MMAP
716 void rrd_free(
717 rrd_t *rrd)
718 {
719 if (rrd->legacy_last_up) { /* this gets set for version < 3 only */
720 free(rrd->live_head);
721 }
722 }
723 #else
724 void rrd_free(
725 rrd_t *rrd)
726 {
727 free(rrd->live_head);
728 free(rrd->stat_head);
729 free(rrd->ds_def);
730 free(rrd->rra_def);
731 free(rrd->rra_ptr);
732 free(rrd->pdp_prep);
733 free(rrd->cdp_prep);
734 free(rrd->rrd_value);
735 }
736 #endif
739 /* routine used by external libraries to free memory allocated by
740 * rrd library */
742 void rrd_freemem(
743 void *mem)
744 {
745 free(mem);
746 }
748 /*
749 * rra_update informs us about the RRAs being updated
750 * The low level storage API may use this information for
751 * aligning RRAs within stripes, or other performance enhancements
752 */
753 void rrd_notify_row(
754 rrd_file_t UNUSED(*rrd_file),
755 int UNUSED(rra_idx),
756 unsigned long UNUSED(rra_row),
757 time_t UNUSED(rra_time))
758 {
759 }
761 /*
762 * This function is called when creating a new RRD
763 * The storage implementation can use this opportunity to select
764 * a sensible starting row within the file.
765 * The default implementation is random, to ensure that all RRAs
766 * don't change to a new disk block at the same time
767 */
768 unsigned long rrd_select_initial_row(
769 rrd_file_t UNUSED(*rrd_file),
770 int UNUSED(rra_idx),
771 rra_def_t *rra
772 )
773 {
774 return rrd_random() % rra->row_cnt;
775 }