X-Git-Url: https://git.tokkee.org/?a=blobdiff_plain;f=cache-tree.c;h=39da54d1e56b5905655eafed1aff0f51c2540a8e;hb=0959df45253de579598dfc431ba5e370acd0ab8e;hp=bfc95d2dc9600083b73b61c06d2b32c983db8511;hpb=7a51ed66f653c248993b3c4a61932e47933d835e;p=git.git diff --git a/cache-tree.c b/cache-tree.c index bfc95d2dc..3d8f218a5 100644 --- a/cache-tree.c +++ b/cache-tree.c @@ -155,13 +155,17 @@ static int verify_cache(struct cache_entry **cache, funny = 0; for (i = 0; i < entries; i++) { struct cache_entry *ce = cache[i]; - if (ce_stage(ce)) { + if (ce_stage(ce) || (ce->ce_flags & CE_INTENT_TO_ADD)) { if (10 < ++funny) { fprintf(stderr, "...\n"); break; } - fprintf(stderr, "%s: unmerged (%s)\n", - ce->name, sha1_to_hex(ce->sha1)); + if (ce_stage(ce)) + fprintf(stderr, "%s: unmerged (%s)\n", + ce->name, sha1_to_hex(ce->sha1)); + else + fprintf(stderr, "%s: not added yet\n", + ce->name); } } if (funny) @@ -341,8 +345,11 @@ static int update_one(struct cache_tree *it, if (dryrun) hash_sha1_file(buffer.buf, buffer.len, tree_type, it->sha1); - else - write_sha1_file(buffer.buf, buffer.len, tree_type, it->sha1); + else if (write_sha1_file(buffer.buf, buffer.len, tree_type, it->sha1)) { + strbuf_release(&buffer); + return -1; + } + strbuf_release(&buffer); it->entry_count = i; #if DEBUG @@ -504,7 +511,7 @@ struct cache_tree *cache_tree_read(const char *buffer, unsigned long size) return read_one(&buffer, &size); } -struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path) +static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path) { while (*path) { const char *slash; @@ -529,3 +536,58 @@ struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path) } return it; } + +int write_cache_as_tree(unsigned char *sha1, int missing_ok, const char *prefix) +{ + int entries, was_valid, newfd; + + /* + * We can't free this memory, it becomes part of a linked list + * parsed atexit() + */ + struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file)); + + newfd = hold_locked_index(lock_file, 1); + + entries = read_cache(); + if (entries < 0) + return WRITE_TREE_UNREADABLE_INDEX; + + if (!active_cache_tree) + active_cache_tree = cache_tree(); + + was_valid = cache_tree_fully_valid(active_cache_tree); + + if (!was_valid) { + if (cache_tree_update(active_cache_tree, + active_cache, active_nr, + missing_ok, 0) < 0) + return WRITE_TREE_UNMERGED_INDEX; + if (0 <= newfd) { + if (!write_cache(newfd, active_cache, active_nr) && + !commit_lock_file(lock_file)) + newfd = -1; + } + /* Not being able to write is fine -- we are only interested + * in updating the cache-tree part, and if the next caller + * ends up using the old index with unupdated cache-tree part + * it misses the work we did here, but that is just a + * performance penalty and not a big deal. + */ + } + + if (prefix) { + struct cache_tree *subtree = + cache_tree_find(active_cache_tree, prefix); + if (!subtree) + return WRITE_TREE_PREFIX_ERROR; + hashcpy(sha1, subtree->sha1); + } + else + hashcpy(sha1, active_cache_tree->sha1); + + if (0 <= newfd) + rollback_lock_file(lock_file); + + return 0; +}