1 /*****************************************************************************
2 * RRDtool 1.4.3 Copyright by Tobi Oetiker, 1997-2010
3 *****************************************************************************
4 * rrd_open.c Open an RRD File
5 *****************************************************************************
6 * $Id$
7 *****************************************************************************/
9 #include "rrd_tool.h"
10 #include "unused.h"
12 #ifdef WIN32
13 #include <stdlib.h>
14 #include <fcntl.h>
15 #include <sys/stat.h>
16 #endif
19 #ifdef HAVE_BROKEN_MS_ASYNC
20 #include <sys/types.h>
21 #include <utime.h>
22 #endif
24 #define MEMBLK 8192
26 #ifdef WIN32
27 #define _LK_UNLCK 0 /* Unlock */
28 #define _LK_LOCK 1 /* Lock */
29 #define _LK_NBLCK 2 /* Non-blocking lock */
30 #define _LK_RLCK 3 /* Lock for read only */
31 #define _LK_NBRLCK 4 /* Non-blocking lock for read only */
34 #define LK_UNLCK _LK_UNLCK
35 #define LK_LOCK _LK_LOCK
36 #define LK_NBLCK _LK_NBLCK
37 #define LK_RLCK _LK_RLCK
38 #define LK_NBRLCK _LK_NBRLCK
39 #endif
41 /* DEBUG 2 prints information obtained via mincore(2) */
42 #define DEBUG 1
43 /* do not calculate exact madvise hints but assume 1 page for headers and
44 * set DONTNEED for the rest, which is assumed to be data */
45 /* Avoid calling madvise on areas that were already hinted. May be benefical if
46 * your syscalls are very slow */
48 #ifdef HAVE_MMAP
49 /* the cast to void* is there to avoid this warning seen on ia64 with certain
50 versions of gcc: 'cast increases required alignment of target type'
51 */
52 #define __rrd_read(dst, dst_t, cnt) { \
53 size_t wanted = sizeof(dst_t)*(cnt); \
54 if (offset + wanted > rrd_file->file_len) { \
55 rrd_set_error("reached EOF while loading header " #dst); \
56 goto out_nullify_head; \
57 } \
58 (dst) = (dst_t*)(void*) (data + offset); \
59 offset += wanted; \
60 }
61 #else
62 #define __rrd_read(dst, dst_t, cnt) { \
63 size_t wanted = sizeof(dst_t)*(cnt); \
64 size_t got; \
65 if ((dst = (dst_t*)malloc(wanted)) == NULL) { \
66 rrd_set_error(#dst " malloc"); \
67 goto out_nullify_head; \
68 } \
69 got = read (rrd_simple_file->fd, dst, wanted); \
70 if (got != wanted) { \
71 rrd_set_error("short read while reading header " #dst); \
72 goto out_nullify_head; \
73 } \
74 offset += got; \
75 }
76 #endif
78 /* get the address of the start of this page */
79 #if defined USE_MADVISE || defined HAVE_POSIX_FADVISE
80 #ifndef PAGE_START
81 #define PAGE_START(addr) ((addr)&(~(_page_size-1)))
82 #endif
83 #endif
85 /* Open a database file, return its header and an open filehandle,
86 * positioned to the first cdp in the first rra.
87 * In the error path of rrd_open, only rrd_free(&rrd) has to be called
88 * before returning an error. Do not call rrd_close upon failure of rrd_open.
89 * If creating a new file, the parameter rrd must be initialised with
90 * details of the file content.
91 * If opening an existing file, then use rrd must be initialised by
92 * rrd_init(rrd) prior to invoking rrd_open
93 */
95 rrd_file_t *rrd_open(
96 const char *const file_name,
97 rrd_t *rrd,
98 unsigned rdwr)
99 {
100 unsigned long ui;
101 int flags = 0;
102 int version;
104 #ifdef HAVE_MMAP
105 ssize_t _page_size = sysconf(_SC_PAGESIZE);
106 char *data = MAP_FAILED;
107 #endif
108 off_t offset = 0;
109 struct stat statb;
110 rrd_file_t *rrd_file = NULL;
111 rrd_simple_file_t *rrd_simple_file = NULL;
112 size_t newfile_size = 0;
113 size_t header_len, value_cnt, data_len;
115 /* Are we creating a new file? */
116 if((rdwr & RRD_CREAT) && (rrd->stat_head != NULL))
117 {
118 header_len = rrd_get_header_size(rrd);
120 value_cnt = 0;
121 for (ui = 0; ui < rrd->stat_head->rra_cnt; ui++)
122 value_cnt += rrd->stat_head->ds_cnt * rrd->rra_def[ui].row_cnt;
124 data_len = sizeof(rrd_value_t) * value_cnt;
126 newfile_size = header_len + data_len;
127 }
129 rrd_file = (rrd_file_t*)malloc(sizeof(rrd_file_t));
130 if (rrd_file == NULL) {
131 rrd_set_error("allocating rrd_file descriptor for '%s'", file_name);
132 return NULL;
133 }
134 memset(rrd_file, 0, sizeof(rrd_file_t));
136 rrd_file->pvt = malloc(sizeof(rrd_simple_file_t));
137 if(rrd_file->pvt == NULL) {
138 rrd_set_error("allocating rrd_simple_file for '%s'", file_name);
139 return NULL;
140 }
141 memset(rrd_file->pvt, 0, sizeof(rrd_simple_file_t));
142 rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
144 #ifdef DEBUG
145 if ((rdwr & (RRD_READONLY | RRD_READWRITE)) ==
146 (RRD_READONLY | RRD_READWRITE)) {
147 /* Both READONLY and READWRITE were given, which is invalid. */
148 rrd_set_error("in read/write request mask");
149 exit(-1);
150 }
151 #endif
153 #ifdef HAVE_MMAP
154 rrd_simple_file->mm_prot = PROT_READ;
155 rrd_simple_file->mm_flags = 0;
156 #endif
158 if (rdwr & RRD_READONLY) {
159 flags |= O_RDONLY;
160 #ifdef HAVE_MMAP
161 # if !defined(AIX)
162 rrd_simple_file->mm_flags = MAP_PRIVATE;
163 # endif
164 # ifdef MAP_NORESERVE
165 rrd_simple_file->mm_flags |= MAP_NORESERVE; /* readonly, so no swap backing needed */
166 # endif
167 #endif
168 } else {
169 if (rdwr & RRD_READWRITE) {
170 flags |= O_RDWR;
171 #ifdef HAVE_MMAP
172 rrd_simple_file->mm_flags = MAP_SHARED;
173 rrd_simple_file->mm_prot |= PROT_WRITE;
174 #endif
175 }
176 if (rdwr & RRD_CREAT) {
177 flags |= (O_CREAT | O_TRUNC);
178 }
179 if (rdwr & RRD_EXCL) {
180 flags |= O_EXCL;
181 }
182 }
183 if (rdwr & RRD_READAHEAD) {
184 #ifdef MAP_POPULATE
185 rrd_simple_file->mm_flags |= MAP_POPULATE; /* populate ptes and data */
186 #endif
187 #if defined MAP_NONBLOCK
188 rrd_simple_file->mm_flags |= MAP_NONBLOCK; /* just populate ptes */
189 #endif
190 }
191 #if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)
192 flags |= O_BINARY;
193 #endif
195 if ((rrd_simple_file->fd = open(file_name, flags, 0666)) < 0) {
196 rrd_set_error("opening '%s': %s", file_name, rrd_strerror(errno));
197 goto out_free;
198 }
200 #ifdef HAVE_MMAP
201 #ifdef HAVE_BROKEN_MS_ASYNC
202 if (rdwr & RRD_READWRITE) {
203 /* some unices, the files mtime does not get update
204 on msync MS_ASYNC, in order to help them,
205 we update the the timestamp at this point.
206 The thing happens pretty 'close' to the open
207 call so the chances of a race should be minimal.
209 Maybe ask your vendor to fix your OS ... */
210 utime(file_name,NULL);
211 }
212 #endif
213 #endif
215 /* Better try to avoid seeks as much as possible. stat may be heavy but
216 * many concurrent seeks are even worse. */
217 if (newfile_size == 0 && ((fstat(rrd_simple_file->fd, &statb)) < 0)) {
218 rrd_set_error("fstat '%s': %s", file_name, rrd_strerror(errno));
219 goto out_close;
220 }
221 if (newfile_size == 0) {
222 rrd_file->file_len = statb.st_size;
223 } else {
224 rrd_file->file_len = newfile_size;
225 lseek(rrd_simple_file->fd, newfile_size - 1, SEEK_SET);
226 if ( write(rrd_simple_file->fd, "\0", 1) == -1){ /* poke */
227 rrd_set_error("write '%s': %s", file_name, rrd_strerror(errno));
228 goto out_close;
229 }
230 lseek(rrd_simple_file->fd, 0, SEEK_SET);
231 }
232 #ifdef HAVE_POSIX_FADVISE
233 /* In general we need no read-ahead when dealing with rrd_files.
234 When we stop reading, it is highly unlikely that we start up again.
235 In this manner we actually save time and diskaccess (and buffer cache).
236 Thanks to Dave Plonka for the Idea of using POSIX_FADV_RANDOM here. */
237 posix_fadvise(rrd_simple_file->fd, 0, 0, POSIX_FADV_RANDOM);
238 #endif
240 /*
241 if (rdwr & RRD_READWRITE)
242 {
243 if (setvbuf((rrd_simple_file->fd),NULL,_IONBF,2)) {
244 rrd_set_error("failed to disable the stream buffer\n");
245 return (-1);
246 }
247 }
248 */
250 #ifdef HAVE_MMAP
251 data = mmap(0, rrd_file->file_len,
252 rrd_simple_file->mm_prot, rrd_simple_file->mm_flags,
253 rrd_simple_file->fd, offset);
255 /* lets see if the first read worked */
256 if (data == MAP_FAILED) {
257 rrd_set_error("mmaping file '%s': %s", file_name,
258 rrd_strerror(errno));
259 goto out_close;
260 }
261 rrd_simple_file->file_start = data;
262 if (rdwr & RRD_CREAT) {
263 memset(data, DNAN, newfile_size - 1);
264 goto out_done;
265 }
266 #endif
267 if (rdwr & RRD_CREAT)
268 goto out_done;
269 #ifdef USE_MADVISE
270 if (rdwr & RRD_COPY) {
271 /* We will read everything in a moment (copying) */
272 madvise(data, rrd_file->file_len, MADV_WILLNEED );
273 madvise(data, rrd_file->file_len, MADV_SEQUENTIAL );
274 } else {
275 /* We do not need to read anything in for the moment */
276 madvise(data, rrd_file->file_len, MADV_RANDOM);
277 /* the stat_head will be needed soonish, so hint accordingly */
278 madvise(data, sizeof(stat_head_t), MADV_WILLNEED);
279 madvise(data, sizeof(stat_head_t), MADV_RANDOM);
280 }
281 #endif
283 __rrd_read(rrd->stat_head, stat_head_t,
284 1);
286 /* lets do some test if we are on track ... */
287 if (memcmp(rrd->stat_head->cookie, RRD_COOKIE, sizeof(RRD_COOKIE)) != 0) {
288 rrd_set_error("'%s' is not an RRD file", file_name);
289 goto out_nullify_head;
290 }
292 if (rrd->stat_head->float_cookie != FLOAT_COOKIE) {
293 rrd_set_error("This RRD was created on another architecture");
294 goto out_nullify_head;
295 }
297 version = atoi(rrd->stat_head->version);
299 if (version > atoi(RRD_VERSION)) {
300 rrd_set_error("can't handle RRD file version %s",
301 rrd->stat_head->version);
302 goto out_nullify_head;
303 }
304 #if defined USE_MADVISE
305 /* the ds_def will be needed soonish, so hint accordingly */
306 madvise(data + PAGE_START(offset),
307 sizeof(ds_def_t) * rrd->stat_head->ds_cnt, MADV_WILLNEED);
308 #endif
309 __rrd_read(rrd->ds_def, ds_def_t,
310 rrd->stat_head->ds_cnt);
312 #if defined USE_MADVISE
313 /* the rra_def will be needed soonish, so hint accordingly */
314 madvise(data + PAGE_START(offset),
315 sizeof(rra_def_t) * rrd->stat_head->rra_cnt, MADV_WILLNEED);
316 #endif
317 __rrd_read(rrd->rra_def, rra_def_t,
318 rrd->stat_head->rra_cnt);
320 /* handle different format for the live_head */
321 if (version < 3) {
322 rrd->live_head = (live_head_t *) malloc(sizeof(live_head_t));
323 if (rrd->live_head == NULL) {
324 rrd_set_error("live_head_t malloc");
325 goto out_close;
326 }
327 #if defined USE_MADVISE
328 /* the live_head will be needed soonish, so hint accordingly */
329 madvise(data + PAGE_START(offset), sizeof(time_t), MADV_WILLNEED);
330 #endif
331 __rrd_read(rrd->legacy_last_up, time_t,
332 1);
334 rrd->live_head->last_up = *rrd->legacy_last_up;
335 rrd->live_head->last_up_usec = 0;
336 } else {
337 #if defined USE_MADVISE
338 /* the live_head will be needed soonish, so hint accordingly */
339 madvise(data + PAGE_START(offset),
340 sizeof(live_head_t), MADV_WILLNEED);
341 #endif
342 __rrd_read(rrd->live_head, live_head_t,
343 1);
344 }
345 __rrd_read(rrd->pdp_prep, pdp_prep_t,
346 rrd->stat_head->ds_cnt);
347 __rrd_read(rrd->cdp_prep, cdp_prep_t,
348 rrd->stat_head->rra_cnt * rrd->stat_head->ds_cnt);
349 __rrd_read(rrd->rra_ptr, rra_ptr_t,
350 rrd->stat_head->rra_cnt);
352 rrd_file->header_len = offset;
353 rrd_file->pos = offset;
355 {
356 unsigned long row_cnt = 0;
358 for (ui=0; ui<rrd->stat_head->rra_cnt; ui++)
359 row_cnt += rrd->rra_def[ui].row_cnt;
361 size_t correct_len = rrd_file->header_len +
362 sizeof(rrd_value_t) * row_cnt * rrd->stat_head->ds_cnt;
364 if (correct_len > rrd_file->file_len)
365 {
366 rrd_set_error("'%s' is too small (should be %ld bytes)",
367 file_name, (long long) correct_len);
368 goto out_nullify_head;
369 }
370 }
372 out_done:
373 return (rrd_file);
374 out_nullify_head:
375 rrd->stat_head = NULL;
376 out_close:
377 #ifdef HAVE_MMAP
378 if (data != MAP_FAILED)
379 munmap(data, rrd_file->file_len);
380 #endif
382 close(rrd_simple_file->fd);
383 out_free:
384 free(rrd_file->pvt);
385 free(rrd_file);
386 return NULL;
387 }
390 #if defined DEBUG && DEBUG > 1
391 /* Print list of in-core pages of a the current rrd_file. */
392 static
393 void mincore_print(
394 rrd_file_t *rrd_file,
395 char *mark)
396 {
397 rrd_simple_file_t *rrd_simple_file;
398 rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
399 #ifdef HAVE_MMAP
400 /* pretty print blocks in core */
401 size_t off;
402 unsigned char *vec;
403 ssize_t _page_size = sysconf(_SC_PAGESIZE);
405 off = rrd_file->file_len +
406 ((rrd_file->file_len + _page_size - 1) / _page_size);
407 vec = malloc(off);
408 if (vec != NULL) {
409 memset(vec, 0, off);
410 if (mincore(rrd_simple_file->file_start, rrd_file->file_len, vec) == 0) {
411 int prev;
412 unsigned is_in = 0, was_in = 0;
414 for (off = 0, prev = 0; off < rrd_file->file_len; ++off) {
415 is_in = vec[off] & 1; /* if lsb set then is core resident */
416 if (off == 0)
417 was_in = is_in;
418 if (was_in != is_in) {
419 fprintf(stderr, "%s: %sin core: %p len %ld\n", mark,
420 was_in ? "" : "not ", vec + prev, off - prev);
421 was_in = is_in;
422 prev = off;
423 }
424 }
425 fprintf(stderr,
426 "%s: %sin core: %p len %ld\n", mark,
427 was_in ? "" : "not ", vec + prev, off - prev);
428 } else
429 fprintf(stderr, "mincore: %s", rrd_strerror(errno));
430 }
431 #else
432 fprintf(stderr, "sorry mincore only works with mmap");
433 #endif
434 }
435 #endif /* defined DEBUG && DEBUG > 1 */
437 /*
438 * get exclusive lock to whole file.
439 * lock gets removed when we close the file
440 *
441 * returns 0 on success
442 */
443 int rrd_lock(
444 rrd_file_t *rrd_file)
445 {
446 int rcstat;
447 rrd_simple_file_t *rrd_simple_file;
448 rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
450 {
451 #if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)
452 struct _stat st;
454 if (_fstat(rrd_simple_file->fd, &st) == 0) {
455 rcstat = _locking(rrd_simple_file->fd, _LK_NBLCK, st.st_size);
456 } else {
457 rcstat = -1;
458 }
459 #else
460 struct flock lock;
462 lock.l_type = F_WRLCK; /* exclusive write lock */
463 lock.l_len = 0; /* whole file */
464 lock.l_start = 0; /* start of file */
465 lock.l_whence = SEEK_SET; /* end of file */
467 rcstat = fcntl(rrd_simple_file->fd, F_SETLK, &lock);
468 #endif
469 }
471 return (rcstat);
472 }
475 /* drop cache except for the header and the active pages */
476 void rrd_dontneed(
477 rrd_file_t *rrd_file,
478 rrd_t *rrd)
479 {
480 rrd_simple_file_t *rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
481 #if defined USE_MADVISE || defined HAVE_POSIX_FADVISE
482 size_t dontneed_start;
483 size_t rra_start;
484 size_t active_block;
485 size_t i;
486 ssize_t _page_size = sysconf(_SC_PAGESIZE);
488 if (rrd_file == NULL) {
489 #if defined DEBUG && DEBUG
490 fprintf (stderr, "rrd_dontneed: Argument 'rrd_file' is NULL.\n");
491 #endif
492 return;
493 }
495 #if defined DEBUG && DEBUG > 1
496 mincore_print(rrd_file, "before");
497 #endif
499 /* ignoring errors from RRDs that are smaller then the file_len+rounding */
500 rra_start = rrd_file->header_len;
501 dontneed_start = PAGE_START(rra_start) + _page_size;
502 for (i = 0; i < rrd->stat_head->rra_cnt; ++i) {
503 active_block =
504 PAGE_START(rra_start
505 + rrd->rra_ptr[i].cur_row
506 * rrd->stat_head->ds_cnt * sizeof(rrd_value_t));
507 if (active_block > dontneed_start) {
508 #ifdef USE_MADVISE
509 madvise(rrd_simple_file->file_start + dontneed_start,
510 active_block - dontneed_start - 1, MADV_DONTNEED);
511 #endif
512 /* in linux at least only fadvise DONTNEED seems to purge pages from cache */
513 #ifdef HAVE_POSIX_FADVISE
514 posix_fadvise(rrd_simple_file->fd, dontneed_start,
515 active_block - dontneed_start - 1,
516 POSIX_FADV_DONTNEED);
517 #endif
518 }
519 dontneed_start = active_block;
520 /* do not release 'hot' block if update for this RAA will occur
521 * within 10 minutes */
522 if (rrd->stat_head->pdp_step * rrd->rra_def[i].pdp_cnt -
523 rrd->live_head->last_up % (rrd->stat_head->pdp_step *
524 rrd->rra_def[i].pdp_cnt) < 10 * 60) {
525 dontneed_start += _page_size;
526 }
527 rra_start +=
528 rrd->rra_def[i].row_cnt * rrd->stat_head->ds_cnt *
529 sizeof(rrd_value_t);
530 }
532 if (dontneed_start < rrd_file->file_len) {
533 #ifdef USE_MADVISE
534 madvise(rrd_simple_file->file_start + dontneed_start,
535 rrd_file->file_len - dontneed_start, MADV_DONTNEED);
536 #endif
537 #ifdef HAVE_POSIX_FADVISE
538 posix_fadvise(rrd_simple_file->fd, dontneed_start,
539 rrd_file->file_len - dontneed_start,
540 POSIX_FADV_DONTNEED);
541 #endif
542 }
544 #if defined DEBUG && DEBUG > 1
545 mincore_print(rrd_file, "after");
546 #endif
547 #endif /* without madvise and posix_fadvise it does not make much sense todo anything */
548 }
554 int rrd_close(
555 rrd_file_t *rrd_file)
556 {
557 rrd_simple_file_t *rrd_simple_file;
558 rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
559 int ret;
561 #ifdef HAVE_MMAP
562 ret = msync(rrd_simple_file->file_start, rrd_file->file_len, MS_ASYNC);
563 if (ret != 0)
564 rrd_set_error("msync rrd_file: %s", rrd_strerror(errno));
565 ret = munmap(rrd_simple_file->file_start, rrd_file->file_len);
566 if (ret != 0)
567 rrd_set_error("munmap rrd_file: %s", rrd_strerror(errno));
568 #endif
569 ret = close(rrd_simple_file->fd);
570 if (ret != 0)
571 rrd_set_error("closing file: %s", rrd_strerror(errno));
572 free(rrd_file->pvt);
573 free(rrd_file);
574 rrd_file = NULL;
575 return ret;
576 }
579 /* Set position of rrd_file. */
581 off_t rrd_seek(
582 rrd_file_t *rrd_file,
583 off_t off,
584 int whence)
585 {
586 off_t ret = 0;
587 rrd_simple_file_t *rrd_simple_file;
588 rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
590 #ifdef HAVE_MMAP
591 if (whence == SEEK_SET)
592 rrd_file->pos = off;
593 else if (whence == SEEK_CUR)
594 rrd_file->pos += off;
595 else if (whence == SEEK_END)
596 rrd_file->pos = rrd_file->file_len + off;
597 #else
598 ret = lseek(rrd_simple_file->fd, off, whence);
599 if (ret < 0)
600 rrd_set_error("lseek: %s", rrd_strerror(errno));
601 rrd_file->pos = ret;
602 #endif
603 /* mimic fseek, which returns 0 upon success */
604 return ret < 0; /*XXX: or just ret to mimic lseek */
605 }
608 /* Get current position in rrd_file. */
610 off_t rrd_tell(
611 rrd_file_t *rrd_file)
612 {
613 return rrd_file->pos;
614 }
617 /* Read count bytes into buffer buf, starting at rrd_file->pos.
618 * Returns the number of bytes read or <0 on error. */
620 ssize_t rrd_read(
621 rrd_file_t *rrd_file,
622 void *buf,
623 size_t count)
624 {
625 rrd_simple_file_t *rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
626 #ifdef HAVE_MMAP
627 size_t _cnt = count;
628 ssize_t _surplus;
630 if (rrd_file->pos > rrd_file->file_len || _cnt == 0) /* EOF */
631 return 0;
632 if (buf == NULL)
633 return -1; /* EINVAL */
634 _surplus = rrd_file->pos + _cnt - rrd_file->file_len;
635 if (_surplus > 0) { /* short read */
636 _cnt -= _surplus;
637 }
638 if (_cnt == 0)
639 return 0; /* EOF */
640 buf = memcpy(buf, rrd_simple_file->file_start + rrd_file->pos, _cnt);
642 rrd_file->pos += _cnt; /* mimmic read() semantics */
643 return _cnt;
644 #else
645 ssize_t ret;
647 ret = read(rrd_simple_file->fd, buf, count);
648 if (ret > 0)
649 rrd_file->pos += ret; /* mimmic read() semantics */
650 return ret;
651 #endif
652 }
655 /* Write count bytes from buffer buf to the current position
656 * rrd_file->pos of rrd_simple_file->fd.
657 * Returns the number of bytes written or <0 on error. */
659 ssize_t rrd_write(
660 rrd_file_t *rrd_file,
661 const void *buf,
662 size_t count)
663 {
664 rrd_simple_file_t *rrd_simple_file = (rrd_simple_file_t *)rrd_file->pvt;
665 #ifdef HAVE_MMAP
666 size_t old_size = rrd_file->file_len;
667 if (count == 0)
668 return 0;
669 if (buf == NULL)
670 return -1; /* EINVAL */
672 if((rrd_file->pos + count) > old_size)
673 {
674 rrd_set_error("attempting to write beyond end of file (%ld + %ld > %ld)",rrd_file->pos, count, old_size);
675 return -1;
676 }
677 memcpy(rrd_simple_file->file_start + rrd_file->pos, buf, count);
678 rrd_file->pos += count;
679 return count; /* mimmic write() semantics */
680 #else
681 ssize_t _sz = write(rrd_simple_file->fd, buf, count);
683 if (_sz > 0)
684 rrd_file->pos += _sz;
685 return _sz;
686 #endif
687 }
690 /* this is a leftover from the old days, it serves no purpose
691 and is therefore turned into a no-op */
692 void rrd_flush(
693 rrd_file_t UNUSED(*rrd_file))
694 {
695 }
697 /* Initialize RRD header. */
699 void rrd_init(
700 rrd_t *rrd)
701 {
702 rrd->stat_head = NULL;
703 rrd->ds_def = NULL;
704 rrd->rra_def = NULL;
705 rrd->live_head = NULL;
706 rrd->legacy_last_up = NULL;
707 rrd->rra_ptr = NULL;
708 rrd->pdp_prep = NULL;
709 rrd->cdp_prep = NULL;
710 rrd->rrd_value = NULL;
711 }
714 /* free RRD header data. */
716 #ifdef HAVE_MMAP
717 void rrd_free(
718 rrd_t *rrd)
719 {
720 if (rrd->legacy_last_up) { /* this gets set for version < 3 only */
721 free(rrd->live_head);
722 }
723 }
724 #else
725 void rrd_free(
726 rrd_t *rrd)
727 {
728 free(rrd->live_head);
729 free(rrd->stat_head);
730 free(rrd->ds_def);
731 free(rrd->rra_def);
732 free(rrd->rra_ptr);
733 free(rrd->pdp_prep);
734 free(rrd->cdp_prep);
735 free(rrd->rrd_value);
736 }
737 #endif
740 /* routine used by external libraries to free memory allocated by
741 * rrd library */
743 void rrd_freemem(
744 void *mem)
745 {
746 free(mem);
747 }
749 /*
750 * rra_update informs us about the RRAs being updated
751 * The low level storage API may use this information for
752 * aligning RRAs within stripes, or other performance enhancements
753 */
754 void rrd_notify_row(
755 rrd_file_t UNUSED(*rrd_file),
756 int UNUSED(rra_idx),
757 unsigned long UNUSED(rra_row),
758 time_t UNUSED(rra_time))
759 {
760 }
762 /*
763 * This function is called when creating a new RRD
764 * The storage implementation can use this opportunity to select
765 * a sensible starting row within the file.
766 * The default implementation is random, to ensure that all RRAs
767 * don't change to a new disk block at the same time
768 */
769 unsigned long rrd_select_initial_row(
770 rrd_file_t UNUSED(*rrd_file),
771 int UNUSED(rra_idx),
772 rra_def_t *rra
773 )
774 {
775 return rrd_random() % rra->row_cnt;
776 }