X-Git-Url: https://git.tokkee.org/?a=blobdiff_plain;f=cache-tree.c;h=39da54d1e56b5905655eafed1aff0f51c2540a8e;hb=79418599e7221ef10641cf9cc07018c25b4d0310;hp=350a79b768e7c032e1b4815f7201c9ce5f88bc8d;hpb=a77a33a51df9b7655d80299487ec6fbb10445496;p=git.git diff --git a/cache-tree.c b/cache-tree.c index 350a79b76..39da54d1e 100644 --- a/cache-tree.c +++ b/cache-tree.c @@ -235,8 +235,7 @@ static int update_one(struct cache_tree *it, int missing_ok, int dryrun) { - unsigned long size, offset; - char *buffer; + struct strbuf buffer; int i; if (0 <= it->entry_count && has_sha1_file(it->sha1)) @@ -293,9 +292,7 @@ static int update_one(struct cache_tree *it, /* * Then write out the tree object for this level. */ - size = 8192; - buffer = xmalloc(size); - offset = 0; + strbuf_init(&buffer, 8192); for (i = 0; i < entries; i++) { struct cache_entry *ce = cache[i]; @@ -323,24 +320,18 @@ static int update_one(struct cache_tree *it, } else { sha1 = ce->sha1; - mode = ntohl(ce->ce_mode); + mode = ce->ce_mode; entlen = pathlen - baselen; } if (mode != S_IFGITLINK && !missing_ok && !has_sha1_file(sha1)) return error("invalid object %s", sha1_to_hex(sha1)); - if (!ce->ce_mode) + if (ce->ce_flags & CE_REMOVE) continue; /* entry being removed */ - if (size < offset + entlen + 100) { - size = alloc_nr(offset + entlen + 100); - buffer = xrealloc(buffer, size); - } - offset += sprintf(buffer + offset, - "%o %.*s", mode, entlen, path + baselen); - buffer[offset++] = 0; - hashcpy((unsigned char*)buffer + offset, sha1); - offset += 20; + strbuf_grow(&buffer, entlen + 100); + strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0'); + strbuf_add(&buffer, sha1, 20); #if DEBUG fprintf(stderr, "cache-tree update-one %o %.*s\n", @@ -349,10 +340,10 @@ static int update_one(struct cache_tree *it, } if (dryrun) - hash_sha1_file(buffer, offset, tree_type, it->sha1); + hash_sha1_file(buffer.buf, buffer.len, tree_type, it->sha1); else - write_sha1_file(buffer, offset, tree_type, it->sha1); - free(buffer); + write_sha1_file(buffer.buf, buffer.len, tree_type, it->sha1); + strbuf_release(&buffer); it->entry_count = i; #if DEBUG fprintf(stderr, "cache-tree update-one (%d ent, %d subtree) %s\n", @@ -378,12 +369,8 @@ int cache_tree_update(struct cache_tree *it, return 0; } -static void *write_one(struct cache_tree *it, - char *path, - int pathlen, - char *buffer, - unsigned long *size, - unsigned long *offset) +static void write_one(struct strbuf *buffer, struct cache_tree *it, + const char *path, int pathlen) { int i; @@ -393,13 +380,9 @@ static void *write_one(struct cache_tree *it, * tree-sha1 (missing if invalid) * subtree_nr "cache-tree" entries for subtrees. */ - if (*size < *offset + pathlen + 100) { - *size = alloc_nr(*offset + pathlen + 100); - buffer = xrealloc(buffer, *size); - } - *offset += sprintf(buffer + *offset, "%.*s%c%d %d\n", - pathlen, path, 0, - it->entry_count, it->subtree_nr); + strbuf_grow(buffer, pathlen + 100); + strbuf_add(buffer, path, pathlen); + strbuf_addf(buffer, "%c%d %d\n", 0, it->entry_count, it->subtree_nr); #if DEBUG if (0 <= it->entry_count) @@ -412,8 +395,7 @@ static void *write_one(struct cache_tree *it, #endif if (0 <= it->entry_count) { - hashcpy((unsigned char*)buffer + *offset, it->sha1); - *offset += 20; + strbuf_add(buffer, it->sha1, 20); } for (i = 0; i < it->subtree_nr; i++) { struct cache_tree_sub *down = it->down[i]; @@ -423,21 +405,13 @@ static void *write_one(struct cache_tree *it, prev->name, prev->namelen) <= 0) die("fatal - unsorted cache subtree"); } - buffer = write_one(down->cache_tree, down->name, down->namelen, - buffer, size, offset); + write_one(buffer, down->cache_tree, down->name, down->namelen); } - return buffer; } -void *cache_tree_write(struct cache_tree *root, unsigned long *size_p) +void cache_tree_write(struct strbuf *sb, struct cache_tree *root) { - char path[PATH_MAX]; - unsigned long size = 8192; - char *buffer = xmalloc(size); - - *size_p = 0; - path[0] = 0; - return write_one(root, path, 0, buffer, &size, size_p); + write_one(sb, root, "", 0); } static struct cache_tree *read_one(const char **buffer, unsigned long *size_p) @@ -478,7 +452,7 @@ static struct cache_tree *read_one(const char **buffer, unsigned long *size_p) if (0 <= it->entry_count) { if (size < 20) goto free_return; - hashcpy(it->sha1, (unsigned char*)buf); + hashcpy(it->sha1, (const unsigned char*)buf); buf += 20; size -= 20; } @@ -555,3 +529,58 @@ struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path) } return it; } + +int write_cache_as_tree(unsigned char *sha1, int missing_ok, const char *prefix) +{ + int entries, was_valid, newfd; + + /* + * We can't free this memory, it becomes part of a linked list + * parsed atexit() + */ + struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file)); + + newfd = hold_locked_index(lock_file, 1); + + entries = read_cache(); + if (entries < 0) + return WRITE_TREE_UNREADABLE_INDEX; + + if (!active_cache_tree) + active_cache_tree = cache_tree(); + + was_valid = cache_tree_fully_valid(active_cache_tree); + + if (!was_valid) { + if (cache_tree_update(active_cache_tree, + active_cache, active_nr, + missing_ok, 0) < 0) + return WRITE_TREE_UNMERGED_INDEX; + if (0 <= newfd) { + if (!write_cache(newfd, active_cache, active_nr) && + !commit_lock_file(lock_file)) + newfd = -1; + } + /* Not being able to write is fine -- we are only interested + * in updating the cache-tree part, and if the next caller + * ends up using the old index with unupdated cache-tree part + * it misses the work we did here, but that is just a + * performance penalty and not a big deal. + */ + } + + if (prefix) { + struct cache_tree *subtree = + cache_tree_find(active_cache_tree, prefix); + if (!subtree) + return WRITE_TREE_PREFIX_ERROR; + hashcpy(sha1, subtree->sha1); + } + else + hashcpy(sha1, active_cache_tree->sha1); + + if (0 <= newfd) + rollback_lock_file(lock_file); + + return 0; +}