X-Git-Url: https://git.tokkee.org/?a=blobdiff_plain;f=builtin-pack-objects.c;h=a15906bdb2021e68a014344cad4e73e9de3367ca;hb=e5f4e214636f9c9bd36c2897634108d5ad5587a1;hp=3d396ca37ac1356c7974aaaf34af39bcd0cf6bae;hpb=f26cacf4956b08238868b21a8eac6b5e51a17d7f;p=git.git diff --git a/builtin-pack-objects.c b/builtin-pack-objects.c index 3d396ca37..a15906bdb 100644 --- a/builtin-pack-objects.c +++ b/builtin-pack-objects.c @@ -15,19 +15,21 @@ #include "list-objects.h" #include "progress.h" +#ifdef THREADED_DELTA_SEARCH +#include +#endif + static const char pack_usage[] = "\ -git-pack-objects [{ -q | --progress | --all-progress }] [--max-pack-size=N] \n\ - [--local] [--incremental] [--window=N] [--depth=N] \n\ +git-pack-objects [{ -q | --progress | --all-progress }] \n\ + [--max-pack-size=N] [--local] [--incremental] \n\ + [--window=N] [--window-memory=N] [--depth=N] \n\ [--no-reuse-delta] [--no-reuse-object] [--delta-base-offset] \n\ - [--non-empty] [--revs [--unpacked | --all]*] [--reflog] \n\ + [--threads=N] [--non-empty] [--revs [--unpacked | --all]*] [--reflog] \n\ [--stdout | base-name] ["); } else { int fd = open_object_dir_tmp("tmp_pack_XXXXXX"); - if (fd < 0) - die("unable to create %s: %s\n", tmpname, strerror(errno)); pack_tmp_name = xstrdup(tmpname); f = sha1fd(fd, pack_tmp_name); } @@ -979,6 +983,8 @@ static void add_pbase_object(struct tree_desc *tree, int cmp; while (tree_entry(tree,&entry)) { + if (S_ISGITLINK(entry.mode)) + continue; cmp = tree_entry_len(entry.path, entry.sha1) != cmplen ? 1 : memcmp(name, entry.path, cmplen); if (cmp > 0) @@ -1270,11 +1276,11 @@ struct unpacked { struct object_entry *entry; void *data; struct delta_index *index; + unsigned depth; }; -static int delta_cacheable(struct unpacked *trg, struct unpacked *src, - unsigned long src_size, unsigned long trg_size, - unsigned long delta_size) +static int delta_cacheable(unsigned long src_size, unsigned long trg_size, + unsigned long delta_size) { if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size) return 0; @@ -1289,6 +1295,31 @@ static int delta_cacheable(struct unpacked *trg, struct unpacked *src, return 0; } +#ifdef THREADED_DELTA_SEARCH + +static pthread_mutex_t read_mutex = PTHREAD_MUTEX_INITIALIZER; +#define read_lock() pthread_mutex_lock(&read_mutex) +#define read_unlock() pthread_mutex_unlock(&read_mutex) + +static pthread_mutex_t cache_mutex = PTHREAD_MUTEX_INITIALIZER; +#define cache_lock() pthread_mutex_lock(&cache_mutex) +#define cache_unlock() pthread_mutex_unlock(&cache_mutex) + +static pthread_mutex_t progress_mutex = PTHREAD_MUTEX_INITIALIZER; +#define progress_lock() pthread_mutex_lock(&progress_mutex) +#define progress_unlock() pthread_mutex_unlock(&progress_mutex) + +#else + +#define read_lock() (void)0 +#define read_unlock() (void)0 +#define cache_lock() (void)0 +#define cache_unlock() (void)0 +#define progress_lock() (void)0 +#define progress_unlock() (void)0 + +#endif + /* * We search for deltas _backwards_ in a list sorted by type and * by size, so that we see progressively smaller and smaller files. @@ -1298,11 +1329,12 @@ static int delta_cacheable(struct unpacked *trg, struct unpacked *src, * one. */ static int try_delta(struct unpacked *trg, struct unpacked *src, - unsigned max_depth) + unsigned max_depth, unsigned long *mem_usage) { struct object_entry *trg_entry = trg->entry; struct object_entry *src_entry = src->entry; unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz; + unsigned ref_depth; enum object_type type; void *delta_buf; @@ -1310,12 +1342,6 @@ static int try_delta(struct unpacked *trg, struct unpacked *src, if (trg_entry->type != src_entry->type) return -1; - /* We do not compute delta to *create* objects we are not - * going to pack. - */ - if (trg_entry->preferred_base) - return -1; - /* * We do not bother to try a delta that we discarded * on an earlier try, but only when reusing delta data. @@ -1327,34 +1353,53 @@ static int try_delta(struct unpacked *trg, struct unpacked *src, return 0; /* Let's not bust the allowed depth. */ - if (src_entry->depth >= max_depth) + if (src->depth >= max_depth) return 0; /* Now some size filtering heuristics. */ trg_size = trg_entry->size; - max_size = trg_size/2 - 20; - max_size = max_size * (max_depth - src_entry->depth) / max_depth; + if (!trg_entry->delta) { + max_size = trg_size/2 - 20; + ref_depth = 1; + } else { + max_size = trg_entry->delta_size; + ref_depth = trg->depth; + } + max_size = max_size * (max_depth - src->depth) / + (max_depth - ref_depth + 1); if (max_size == 0) return 0; - if (trg_entry->delta && trg_entry->delta_size <= max_size) - max_size = trg_entry->delta_size-1; src_size = src_entry->size; sizediff = src_size < trg_size ? trg_size - src_size : 0; if (sizediff >= max_size) return 0; + if (trg_size < src_size / 32) + return 0; /* Load data if not already done */ if (!trg->data) { + read_lock(); trg->data = read_sha1_file(trg_entry->idx.sha1, &type, &sz); + read_unlock(); + if (!trg->data) + die("object %s cannot be read", + sha1_to_hex(trg_entry->idx.sha1)); if (sz != trg_size) die("object %s inconsistent object length (%lu vs %lu)", sha1_to_hex(trg_entry->idx.sha1), sz, trg_size); + *mem_usage += sz; } if (!src->data) { + read_lock(); src->data = read_sha1_file(src_entry->idx.sha1, &type, &sz); + read_unlock(); + if (!src->data) + die("object %s cannot be read", + sha1_to_hex(src_entry->idx.sha1)); if (sz != src_size) die("object %s inconsistent object length (%lu vs %lu)", sha1_to_hex(src_entry->idx.sha1), sz, src_size); + *mem_usage += sz; } if (!src->index) { src->index = create_delta_index(src->data, src_size); @@ -1364,26 +1409,47 @@ static int try_delta(struct unpacked *trg, struct unpacked *src, warning("suboptimal pack - out of memory"); return 0; } + *mem_usage += sizeof_delta_index(src->index); } delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size); if (!delta_buf) return 0; - if (trg_entry->delta_data) { - delta_cache_size -= trg_entry->delta_size; - free(trg_entry->delta_data); + if (trg_entry->delta) { + /* Prefer only shallower same-sized deltas. */ + if (delta_size == trg_entry->delta_size && + src->depth + 1 >= trg->depth) { + free(delta_buf); + return 0; + } } - trg_entry->delta_data = 0; + trg_entry->delta = src_entry; trg_entry->delta_size = delta_size; - trg_entry->depth = src_entry->depth + 1; + trg->depth = src->depth + 1; - if (delta_cacheable(src, trg, src_size, trg_size, delta_size)) { - trg_entry->delta_data = xrealloc(delta_buf, delta_size); + /* + * Handle memory allocation outside of the cache + * accounting lock. Compiler will optimize the strangeness + * away when THREADED_DELTA_SEARCH is not defined. + */ + if (trg_entry->delta_data) + free(trg_entry->delta_data); + cache_lock(); + if (trg_entry->delta_data) { + delta_cache_size -= trg_entry->delta_size; + trg_entry->delta_data = NULL; + } + if (delta_cacheable(src_size, trg_size, delta_size)) { delta_cache_size += trg_entry->delta_size; - } else + cache_unlock(); + trg_entry->delta_data = xrealloc(delta_buf, delta_size); + } else { + cache_unlock(); free(delta_buf); + } + return 1; } @@ -1400,48 +1466,59 @@ static unsigned int check_delta_limit(struct object_entry *me, unsigned int n) return m; } -static void find_deltas(struct object_entry **list, int window, int depth) +static unsigned long free_unpacked(struct unpacked *n) +{ + unsigned long freed_mem = sizeof_delta_index(n->index); + free_delta_index(n->index); + n->index = NULL; + if (n->data) { + freed_mem += n->entry->size; + free(n->data); + n->data = NULL; + } + n->entry = NULL; + n->depth = 0; + return freed_mem; +} + +static void find_deltas(struct object_entry **list, unsigned list_size, + int window, int depth, unsigned *processed) { - uint32_t i = nr_objects, idx = 0, processed = 0; + uint32_t i = list_size, idx = 0, count = 0; unsigned int array_size = window * sizeof(struct unpacked); struct unpacked *array; - int max_depth; + unsigned long mem_usage = 0; - if (!nr_objects) - return; array = xmalloc(array_size); memset(array, 0, array_size); - if (progress) - start_progress(&progress_state, "Deltifying %u objects...", "", nr_result); do { struct object_entry *entry = list[--i]; struct unpacked *n = array + idx; - int j; - - if (!entry->preferred_base) - processed++; + int j, max_depth, best_base = -1; - if (progress) - display_progress(&progress_state, processed); - - if (entry->delta) - /* This happens if we decided to reuse existing - * delta from a pack. "!no_reuse_delta &&" is implied. - */ - continue; + mem_usage -= free_unpacked(n); + n->entry = entry; - if (entry->size < 50) - continue; + while (window_memory_limit && + mem_usage > window_memory_limit && + count > 1) { + uint32_t tail = (idx + window - count) % window; + mem_usage -= free_unpacked(array + tail); + count--; + } - if (entry->no_try_delta) - continue; + /* We do not compute delta to *create* objects we are not + * going to pack. + */ + if (entry->preferred_base) + goto next; - free_delta_index(n->index); - n->index = NULL; - free(n->data); - n->data = NULL; - n->entry = entry; + progress_lock(); + (*processed)++; + if (progress) + display_progress(&progress_state, *processed); + progress_unlock(); /* * If the current object is at pack edge, take the depth the @@ -1457,6 +1534,7 @@ static void find_deltas(struct object_entry **list, int window, int depth) j = window; while (--j > 0) { + int ret; uint32_t other_idx = idx + j; struct unpacked *m; if (other_idx >= window) @@ -1464,26 +1542,45 @@ static void find_deltas(struct object_entry **list, int window, int depth) m = array + other_idx; if (!m->entry) break; - if (try_delta(n, m, max_depth) < 0) + ret = try_delta(n, m, max_depth, &mem_usage); + if (ret < 0) break; + else if (ret > 0) + best_base = other_idx; } /* if we made n a delta, and if n is already at max * depth, leaving it in the window is pointless. we * should evict it first. */ - if (entry->delta && depth <= entry->depth) + if (entry->delta && depth <= n->depth) continue; + /* + * Move the best delta base up in the window, after the + * currently deltified object, to keep it longer. It will + * be the first base object to be attempted next. + */ + if (entry->delta) { + struct unpacked swap = array[best_base]; + int dist = (window + idx - best_base) % window; + int dst = best_base; + while (dist--) { + int src = (dst + 1) % window; + array[dst] = array[src]; + dst = src; + } + array[dst] = swap; + } + next: idx++; + if (count + 1 < window) + count++; if (idx >= window) idx = 0; } while (i > 0); - if (progress) - stop_progress(&progress_state); - for (i = 0; i < window; ++i) { free_delta_index(array[i].index); free(array[i].data); @@ -1491,21 +1588,145 @@ static void find_deltas(struct object_entry **list, int window, int depth) free(array); } +#ifdef THREADED_DELTA_SEARCH + +struct thread_params { + pthread_t thread; + struct object_entry **list; + unsigned list_size; + int window; + int depth; + unsigned *processed; +}; + +static pthread_mutex_t data_request = PTHREAD_MUTEX_INITIALIZER; +static pthread_mutex_t data_ready = PTHREAD_MUTEX_INITIALIZER; +static pthread_mutex_t data_provider = PTHREAD_MUTEX_INITIALIZER; +static struct thread_params *data_requester; + +static void *threaded_find_deltas(void *arg) +{ + struct thread_params *me = arg; + + for (;;) { + pthread_mutex_lock(&data_request); + data_requester = me; + pthread_mutex_unlock(&data_provider); + pthread_mutex_lock(&data_ready); + pthread_mutex_unlock(&data_request); + + if (!me->list_size) + return NULL; + + find_deltas(me->list, me->list_size, + me->window, me->depth, me->processed); + } +} + +static void ll_find_deltas(struct object_entry **list, unsigned list_size, + int window, int depth, unsigned *processed) +{ + struct thread_params *target, p[delta_search_threads]; + int i, ret; + unsigned chunk_size; + + if (delta_search_threads <= 1) { + find_deltas(list, list_size, window, depth, processed); + return; + } + + pthread_mutex_lock(&data_provider); + pthread_mutex_lock(&data_ready); + + for (i = 0; i < delta_search_threads; i++) { + p[i].window = window; + p[i].depth = depth; + p[i].processed = processed; + ret = pthread_create(&p[i].thread, NULL, + threaded_find_deltas, &p[i]); + if (ret) + die("unable to create thread: %s", strerror(ret)); + } + + /* this should be auto-tuned somehow */ + chunk_size = window * 1000; + + do { + unsigned sublist_size = chunk_size; + if (sublist_size > list_size) + sublist_size = list_size; + + /* try to split chunks on "path" boundaries */ + while (sublist_size < list_size && list[sublist_size]->hash && + list[sublist_size]->hash == list[sublist_size-1]->hash) + sublist_size++; + + pthread_mutex_lock(&data_provider); + target = data_requester; + target->list = list; + target->list_size = sublist_size; + pthread_mutex_unlock(&data_ready); + + list += sublist_size; + list_size -= sublist_size; + if (!sublist_size) { + pthread_join(target->thread, NULL); + i--; + } + } while (i); +} + +#else +#define ll_find_deltas find_deltas +#endif + static void prepare_pack(int window, int depth) { struct object_entry **delta_list; - uint32_t i; + uint32_t i, n, nr_deltas; get_object_details(); - if (!window || !depth) + if (!nr_objects || !window || !depth) return; delta_list = xmalloc(nr_objects * sizeof(*delta_list)); - for (i = 0; i < nr_objects; i++) - delta_list[i] = objects + i; - qsort(delta_list, nr_objects, sizeof(*delta_list), type_size_sort); - find_deltas(delta_list, window+1, depth); + nr_deltas = n = 0; + + for (i = 0; i < nr_objects; i++) { + struct object_entry *entry = objects + i; + + if (entry->delta) + /* This happens if we decided to reuse existing + * delta from a pack. "!no_reuse_delta &&" is implied. + */ + continue; + + if (entry->size < 50) + continue; + + if (entry->no_try_delta) + continue; + + if (!entry->preferred_base) + nr_deltas++; + + delta_list[n++] = entry; + } + + if (nr_deltas) { + unsigned nr_done = 0; + if (progress) + start_progress(&progress_state, + "Deltifying %u objects...", "", + nr_deltas); + qsort(delta_list, n, sizeof(*delta_list), type_size_sort); + ll_find_deltas(delta_list, n, window+1, depth, &nr_done); + if (progress) + stop_progress(&progress_state); + if (nr_done != nr_deltas) + die("inconsistency with delta count"); + } free(delta_list); } @@ -1515,7 +1736,11 @@ static int git_pack_config(const char *k, const char *v) window = git_config_int(k, v); return 0; } - if(!strcmp(k, "pack.depth")) { + if (!strcmp(k, "pack.windowmemory")) { + window_memory_limit = git_config_ulong(k, v); + return 0; + } + if (!strcmp(k, "pack.depth")) { depth = git_config_int(k, v); return 0; } @@ -1537,6 +1762,17 @@ static int git_pack_config(const char *k, const char *v) cache_max_small_delta_size = git_config_int(k, v); return 0; } + if (!strcmp(k, "pack.threads")) { + delta_search_threads = git_config_int(k, v); + if (delta_search_threads < 1) + die("invalid number of threads specified (%d)", + delta_search_threads); +#ifndef THREADED_DELTA_SEARCH + if (delta_search_threads > 1) + warning("no threads support, ignoring %s", k); +#endif + return 0; + } return git_default_config(k, v); } @@ -1691,6 +1927,23 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix) usage(pack_usage); continue; } + if (!prefixcmp(arg, "--window-memory=")) { + if (!git_parse_ulong(arg+16, &window_memory_limit)) + usage(pack_usage); + continue; + } + if (!prefixcmp(arg, "--threads=")) { + char *end; + delta_search_threads = strtoul(arg+10, &end, 0); + if (!arg[10] || *end || delta_search_threads < 1) + usage(pack_usage); +#ifndef THREADED_DELTA_SEARCH + if (delta_search_threads > 1) + warning("no threads support, " + "ignoring %s", arg); +#endif + continue; + } if (!prefixcmp(arg, "--depth=")) { char *end; depth = strtoul(arg+8, &end, 0);