Code

Merge branch 'master' into db/fetch-pack
authorShawn O. Pearce <spearce@spearce.org>
Tue, 16 Oct 2007 04:15:25 +0000 (00:15 -0400)
committerShawn O. Pearce <spearce@spearce.org>
Tue, 16 Oct 2007 04:15:25 +0000 (00:15 -0400)
There's a number of tricky conflicts between master and
this topic right now due to the rewrite of builtin-push.
Junio must have handled these via rerere; I'd rather not
deal with them again so I'm pre-merging master into the
topic.  Besides this topic somehow started to depend on
the strbuf series that was in next, but is now in master.
It no longer compiles on its own without the strbuf API.

* master: (184 commits)
  Whip post 1.5.3.4 maintenance series into shape.
  Minor usage update in setgitperms.perl
  manual: use 'URL' instead of 'url'.
  manual: add some markup.
  manual: Fix example finding commits referencing given content.
  Fix wording in push definition.
  Fix some typos, punctuation, missing words, minor markup.
  manual: Fix or remove em dashes.
  Add a --dry-run option to git-push.
  Add a --dry-run option to git-send-pack.
  Fix in-place editing functions in convert.c
  instaweb: support for Ruby's WEBrick server
  instaweb: allow for use of auto-generated scripts
  Add 'git-p4 commit' as an alias for 'git-p4 submit'
  hg-to-git speedup through selectable repack intervals
  git-svn: respect Subversion's [auth] section configuration values
  gtksourceview2 support for gitview
  fix contrib/hooks/post-receive-email hooks.recipients error message
  Support cvs via git-shell
  rebase -i: use diff plumbing instead of porcelain
  ...

Conflicts:

Makefile
builtin-push.c
rsh.c

14 files changed:
1  2 
Documentation/config.txt
Makefile
builtin-push.c
builtin.h
cache.h
connect.c
contrib/examples/git-fetch.sh
git.c
http-push.c
refs.c
send-pack.c
transport.c
transport.h
walker.c

Simple merge
diff --cc Makefile
index 7dd212a19e314181f6d91aed8e3fa87c2eebfba7,d74ac93c116fe24d1a23df1c7c21d9ca0cc1c23f..bb4873d75431f5b86ab2ce807d7f7d6a54c71faa
+++ b/Makefile
@@@ -232,8 -233,8 +232,8 @@@ SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH
  
  # ... and all the rest that could be moved out of bindir to gitexecdir
  PROGRAMS = \
-       git-convert-objects$X git-fetch-pack$X \
+       git-fetch-pack$X \
 -      git-hash-object$X git-index-pack$X git-local-fetch$X \
 +      git-hash-object$X git-index-pack$X \
        git-fast-import$X \
        git-daemon$X \
        git-merge-index$X git-mktag$X git-mktree$X git-patch-id$X \
diff --cc builtin-push.c
index 4ee36c292d9d42f658673c5d37d80077d65dc855,141380b852771e107ee5ae1df3f8b9e30cfcea6b..a552f0dac654bb532ecd43ec5c8cd15e113ef82c
@@@ -6,11 -6,10 +6,11 @@@
  #include "run-command.h"
  #include "builtin.h"
  #include "remote.h"
 +#include "transport.h"
  
- static const char push_usage[] = "git-push [--all] [--tags] [--receive-pack=<git-receive-pack>] [--repo=all] [-f | --force] [-v] [<repository> <refspec>...]";
+ static const char push_usage[] = "git-push [--all] [--dry-run] [--tags] [--receive-pack=<git-receive-pack>] [--repo=all] [-f | --force] [-v] [<repository> <refspec>...]";
  
 -static int all, dry_run, force, thin, verbose;
 +static int all, thin, verbose;
  static const char *receivepack;
  
  static const char **refspec;
@@@ -104,9 -146,13 +104,13 @@@ int cmd_push(int argc, const char **arg
                        continue;
                }
                if (!strcmp(arg, "--all")) {
 -                      all = 1;
 +                      flags |= TRANSPORT_PUSH_ALL;
                        continue;
                }
 -                      dry_run = 1;
+               if (!strcmp(arg, "--dry-run")) {
++                      flags |= TRANSPORT_PUSH_DRY_RUN;
+                       continue;
+               }
                if (!strcmp(arg, "--tags")) {
                        add_refspec("refs/tags/*");
                        continue;
diff --cc builtin.h
Simple merge
diff --cc cache.h
Simple merge
diff --cc connect.c
Simple merge
index c3a200120df636fe8db0f02902d92c351c6c5e2e,0000000000000000000000000000000000000000..e44af2c86d8e7e44bc79aafcc8ccef3806804720
mode 100755,000000..100755
--- /dev/null
@@@ -1,377 -1,0 +1,377 @@@
- while case "$#" in 0) break ;; esac
 +#!/bin/sh
 +#
 +
 +USAGE='<fetch-options> <repository> <refspec>...'
 +SUBDIRECTORY_OK=Yes
 +. git-sh-setup
 +set_reflog_action "fetch $*"
 +cd_to_toplevel ;# probably unnecessary...
 +
 +. git-parse-remote
 +_x40='[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]'
 +_x40="$_x40$_x40$_x40$_x40$_x40$_x40$_x40$_x40"
 +
 +LF='
 +'
 +IFS="$LF"
 +
 +no_tags=
 +tags=
 +append=
 +force=
 +verbose=
 +update_head_ok=
 +exec=
 +keep=
 +shallow_depth=
 +no_progress=
 +test -t 1 || no_progress=--no-progress
 +quiet=
++while test $# != 0
 +do
 +      case "$1" in
 +      -a|--a|--ap|--app|--appe|--appen|--append)
 +              append=t
 +              ;;
 +      --upl|--uplo|--uploa|--upload|--upload-|--upload-p|\
 +      --upload-pa|--upload-pac|--upload-pack)
 +              shift
 +              exec="--upload-pack=$1"
 +              ;;
 +      --upl=*|--uplo=*|--uploa=*|--upload=*|\
 +      --upload-=*|--upload-p=*|--upload-pa=*|--upload-pac=*|--upload-pack=*)
 +              exec=--upload-pack=$(expr "z$1" : 'z-[^=]*=\(.*\)')
 +              shift
 +              ;;
 +      -f|--f|--fo|--for|--forc|--force)
 +              force=t
 +              ;;
 +      -t|--t|--ta|--tag|--tags)
 +              tags=t
 +              ;;
 +      -n|--n|--no|--no-|--no-t|--no-ta|--no-tag|--no-tags)
 +              no_tags=t
 +              ;;
 +      -u|--u|--up|--upd|--upda|--updat|--update|--update-|--update-h|\
 +      --update-he|--update-hea|--update-head|--update-head-|\
 +      --update-head-o|--update-head-ok)
 +              update_head_ok=t
 +              ;;
 +      -q|--q|--qu|--qui|--quie|--quiet)
 +              quiet=--quiet
 +              ;;
 +      -v|--verbose)
 +              verbose="$verbose"Yes
 +              ;;
 +      -k|--k|--ke|--kee|--keep)
 +              keep='-k -k'
 +              ;;
 +      --depth=*)
 +              shallow_depth="--depth=`expr "z$1" : 'z-[^=]*=\(.*\)'`"
 +              ;;
 +      --depth)
 +              shift
 +              shallow_depth="--depth=$1"
 +              ;;
 +      -*)
 +              usage
 +              ;;
 +      *)
 +              break
 +              ;;
 +      esac
 +      shift
 +done
 +
 +case "$#" in
 +0)
 +      origin=$(get_default_remote)
 +      test -n "$(get_remote_url ${origin})" ||
 +              die "Where do you want to fetch from today?"
 +      set x $origin ; shift ;;
 +esac
 +
 +if test -z "$exec"
 +then
 +      # No command line override and we have configuration for the remote.
 +      exec="--upload-pack=$(get_uploadpack $1)"
 +fi
 +
 +remote_nick="$1"
 +remote=$(get_remote_url "$@")
 +refs=
 +rref=
 +rsync_slurped_objects=
 +
 +if test "" = "$append"
 +then
 +      : >"$GIT_DIR/FETCH_HEAD"
 +fi
 +
 +# Global that is reused later
 +ls_remote_result=$(git ls-remote $exec "$remote") ||
 +      die "Cannot get the repository state from $remote"
 +
 +append_fetch_head () {
 +      flags=
 +      test -n "$verbose" && flags="$flags$LF-v"
 +      test -n "$force$single_force" && flags="$flags$LF-f"
 +      GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION" \
 +              git fetch--tool $flags append-fetch-head "$@"
 +}
 +
 +# updating the current HEAD with git-fetch in a bare
 +# repository is always fine.
 +if test -z "$update_head_ok" && test $(is_bare_repository) = false
 +then
 +      orig_head=$(git rev-parse --verify HEAD 2>/dev/null)
 +fi
 +
 +# Allow --notags from remote.$1.tagopt
 +case "$tags$no_tags" in
 +'')
 +      case "$(git config --get "remote.$1.tagopt")" in
 +      --no-tags)
 +              no_tags=t ;;
 +      esac
 +esac
 +
 +# If --tags (and later --heads or --all) is specified, then we are
 +# not talking about defaults stored in Pull: line of remotes or
 +# branches file, and just fetch those and refspecs explicitly given.
 +# Otherwise we do what we always did.
 +
 +reflist=$(get_remote_refs_for_fetch "$@")
 +if test "$tags"
 +then
 +      taglist=`IFS='  ' &&
 +                echo "$ls_remote_result" |
 +                git show-ref --exclude-existing=refs/tags/ |
 +                while read sha1 name
 +                do
 +                      echo ".${name}:${name}"
 +                done` || exit
 +      if test "$#" -gt 1
 +      then
 +              # remote URL plus explicit refspecs; we need to merge them.
 +              reflist="$reflist$LF$taglist"
 +      else
 +              # No explicit refspecs; fetch tags only.
 +              reflist=$taglist
 +      fi
 +fi
 +
 +fetch_all_at_once () {
 +
 +  eval=$(echo "$1" | git fetch--tool parse-reflist "-")
 +  eval "$eval"
 +
 +    ( : subshell because we muck with IFS
 +      IFS="   $LF"
 +      (
 +      if test "$remote" = . ; then
 +          git show-ref $rref || echo failed "$remote"
 +      elif test -f "$remote" ; then
 +          test -n "$shallow_depth" &&
 +              die "shallow clone with bundle is not supported"
 +          git bundle unbundle "$remote" $rref ||
 +          echo failed "$remote"
 +      else
 +              if      test -d "$remote" &&
 +
 +                      # The remote might be our alternate.  With
 +                      # this optimization we will bypass fetch-pack
 +                      # altogether, which means we cannot be doing
 +                      # the shallow stuff at all.
 +                      test ! -f "$GIT_DIR/shallow" &&
 +                      test -z "$shallow_depth" &&
 +
 +                      # See if all of what we are going to fetch are
 +                      # connected to our repository's tips, in which
 +                      # case we do not have to do any fetch.
 +                      theirs=$(echo "$ls_remote_result" | \
 +                              git fetch--tool -s pick-rref "$rref" "-") &&
 +
 +                      # This will barf when $theirs reach an object that
 +                      # we do not have in our repository.  Otherwise,
 +                      # we already have everything the fetch would bring in.
 +                      git rev-list --objects $theirs --not --all \
 +                              >/dev/null 2>/dev/null
 +              then
 +                      echo "$ls_remote_result" | \
 +                              git fetch--tool pick-rref "$rref" "-"
 +              else
 +                      flags=
 +                      case $verbose in
 +                      YesYes*)
 +                          flags="-v"
 +                          ;;
 +                      esac
 +                      git-fetch-pack --thin $exec $keep $shallow_depth \
 +                              $quiet $no_progress $flags "$remote" $rref ||
 +                      echo failed "$remote"
 +              fi
 +      fi
 +      ) |
 +      (
 +      flags=
 +      test -n "$verbose" && flags="$flags -v"
 +      test -n "$force" && flags="$flags -f"
 +      GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION" \
 +              git fetch--tool $flags native-store \
 +                      "$remote" "$remote_nick" "$refs"
 +      )
 +    ) || exit
 +
 +}
 +
 +fetch_per_ref () {
 +  reflist="$1"
 +  refs=
 +  rref=
 +
 +  for ref in $reflist
 +  do
 +      refs="$refs$LF$ref"
 +
 +      # These are relative path from $GIT_DIR, typically starting at refs/
 +      # but may be HEAD
 +      if expr "z$ref" : 'z\.' >/dev/null
 +      then
 +        not_for_merge=t
 +        ref=$(expr "z$ref" : 'z\.\(.*\)')
 +      else
 +        not_for_merge=
 +      fi
 +      if expr "z$ref" : 'z+' >/dev/null
 +      then
 +        single_force=t
 +        ref=$(expr "z$ref" : 'z+\(.*\)')
 +      else
 +        single_force=
 +      fi
 +      remote_name=$(expr "z$ref" : 'z\([^:]*\):')
 +      local_name=$(expr "z$ref" : 'z[^:]*:\(.*\)')
 +
 +      rref="$rref$LF$remote_name"
 +
 +      # There are transports that can fetch only one head at a time...
 +      case "$remote" in
 +      http://* | https://* | ftp://*)
 +        test -n "$shallow_depth" &&
 +              die "shallow clone with http not supported"
 +        proto=`expr "$remote" : '\([^:]*\):'`
 +        if [ -n "$GIT_SSL_NO_VERIFY" ]; then
 +            curl_extra_args="-k"
 +        fi
 +        if [ -n "$GIT_CURL_FTP_NO_EPSV" -o \
 +              "`git config --bool http.noEPSV`" = true ]; then
 +            noepsv_opt="--disable-epsv"
 +        fi
 +
 +        # Find $remote_name from ls-remote output.
 +        head=$(echo "$ls_remote_result" | \
 +              git fetch--tool -s pick-rref "$remote_name" "-")
 +        expr "z$head" : "z$_x40\$" >/dev/null ||
 +              die "No such ref $remote_name at $remote"
 +        echo >&2 "Fetching $remote_name from $remote using $proto"
 +        case "$quiet" in '') v=-v ;; *) v= ;; esac
 +        git-http-fetch $v -a "$head" "$remote" || exit
 +        ;;
 +      rsync://*)
 +        test -n "$shallow_depth" &&
 +              die "shallow clone with rsync not supported"
 +        TMP_HEAD="$GIT_DIR/TMP_HEAD"
 +        rsync -L -q "$remote/$remote_name" "$TMP_HEAD" || exit 1
 +        head=$(git rev-parse --verify TMP_HEAD)
 +        rm -f "$TMP_HEAD"
 +        case "$quiet" in '') v=-v ;; *) v= ;; esac
 +        test "$rsync_slurped_objects" || {
 +            rsync -a $v --ignore-existing --exclude info \
 +                "$remote/objects/" "$GIT_OBJECT_DIRECTORY/" || exit
 +
 +            # Look at objects/info/alternates for rsync -- http will
 +            # support it natively and git native ones will do it on
 +            # the remote end.  Not having that file is not a crime.
 +            rsync -q "$remote/objects/info/alternates" \
 +                "$GIT_DIR/TMP_ALT" 2>/dev/null ||
 +                rm -f "$GIT_DIR/TMP_ALT"
 +            if test -f "$GIT_DIR/TMP_ALT"
 +            then
 +                resolve_alternates "$remote" <"$GIT_DIR/TMP_ALT" |
 +                while read alt
 +                do
 +                    case "$alt" in 'bad alternate: '*) die "$alt";; esac
 +                    echo >&2 "Getting alternate: $alt"
 +                    rsync -av --ignore-existing --exclude info \
 +                    "$alt" "$GIT_OBJECT_DIRECTORY/" || exit
 +                done
 +                rm -f "$GIT_DIR/TMP_ALT"
 +            fi
 +            rsync_slurped_objects=t
 +        }
 +        ;;
 +      esac
 +
 +      append_fetch_head "$head" "$remote" \
 +        "$remote_name" "$remote_nick" "$local_name" "$not_for_merge" || exit
 +
 +  done
 +
 +}
 +
 +fetch_main () {
 +      case "$remote" in
 +      http://* | https://* | ftp://* | rsync://* )
 +              fetch_per_ref "$@"
 +              ;;
 +      *)
 +              fetch_all_at_once "$@"
 +              ;;
 +      esac
 +}
 +
 +fetch_main "$reflist" || exit
 +
 +# automated tag following
 +case "$no_tags$tags" in
 +'')
 +      case "$reflist" in
 +      *:refs/*)
 +              # effective only when we are following remote branch
 +              # using local tracking branch.
 +              taglist=$(IFS=' ' &&
 +              echo "$ls_remote_result" |
 +              git show-ref --exclude-existing=refs/tags/ |
 +              while read sha1 name
 +              do
 +                      git cat-file -t "$sha1" >/dev/null 2>&1 || continue
 +                      echo >&2 "Auto-following $name"
 +                      echo ".${name}:${name}"
 +              done)
 +      esac
 +      case "$taglist" in
 +      '') ;;
 +      ?*)
 +              # do not deepen a shallow tree when following tags
 +              shallow_depth=
 +              fetch_main "$taglist" || exit ;;
 +      esac
 +esac
 +
 +# If the original head was empty (i.e. no "master" yet), or
 +# if we were told not to worry, we do not have to check.
 +case "$orig_head" in
 +'')
 +      ;;
 +?*)
 +      curr_head=$(git rev-parse --verify HEAD 2>/dev/null)
 +      if test "$curr_head" != "$orig_head"
 +      then
 +          git update-ref \
 +                      -m "$GIT_REFLOG_ACTION: Undoing incorrectly fetched HEAD." \
 +                      HEAD "$orig_head"
 +              die "Cannot fetch into the current branch."
 +      fi
 +      ;;
 +esac
diff --cc git.c
Simple merge
diff --cc http-push.c
Simple merge
diff --cc refs.c
Simple merge
diff --cc send-pack.c
Simple merge
diff --cc transport.c
index 46da754078f3c679e62124fb7e7468e44d85dafc,0000000000000000000000000000000000000000..e363c11c052bcd47401068c5caad0e34f72d87b8
mode 100644,000000..100644
--- /dev/null
@@@ -1,819 -1,0 +1,821 @@@
 +#include "cache.h"
 +#include "transport.h"
 +#include "run-command.h"
 +#ifndef NO_CURL
 +#include "http.h"
 +#endif
 +#include "pkt-line.h"
 +#include "fetch-pack.h"
 +#include "walker.h"
 +#include "bundle.h"
 +#include "dir.h"
 +#include "refs.h"
 +
 +/* rsync support */
 +
 +/*
 + * We copy packed-refs and refs/ into a temporary file, then read the
 + * loose refs recursively (sorting whenever possible), and then inserting
 + * those packed refs that are not yet in the list (not validating, but
 + * assuming that the file is sorted).
 + *
 + * Appears refactoring this from refs.c is too cumbersome.
 + */
 +
 +static int str_cmp(const void *a, const void *b)
 +{
 +      const char *s1 = a;
 +      const char *s2 = b;
 +
 +      return strcmp(s1, s2);
 +}
 +
 +/* path->buf + name_offset is expected to point to "refs/" */
 +
 +static int read_loose_refs(struct strbuf *path, int name_offset,
 +              struct ref **tail)
 +{
 +      DIR *dir = opendir(path->buf);
 +      struct dirent *de;
 +      struct {
 +              char **entries;
 +              int nr, alloc;
 +      } list;
 +      int i, pathlen;
 +
 +      if (!dir)
 +              return -1;
 +
 +      memset (&list, 0, sizeof(list));
 +
 +      while ((de = readdir(dir))) {
 +              if (de->d_name[0] == '.' && (de->d_name[1] == '\0' ||
 +                              (de->d_name[1] == '.' &&
 +                               de->d_name[2] == '\0')))
 +                      continue;
 +              ALLOC_GROW(list.entries, list.nr + 1, list.alloc);
 +              list.entries[list.nr++] = xstrdup(de->d_name);
 +      }
 +      closedir(dir);
 +
 +      /* sort the list */
 +
 +      qsort(list.entries, list.nr, sizeof(char *), str_cmp);
 +
 +      pathlen = path->len;
 +      strbuf_addch(path, '/');
 +
 +      for (i = 0; i < list.nr; i++, strbuf_setlen(path, pathlen + 1)) {
 +              strbuf_addstr(path, list.entries[i]);
 +              if (read_loose_refs(path, name_offset, tail)) {
 +                      int fd = open(path->buf, O_RDONLY);
 +                      char buffer[40];
 +                      struct ref *next;
 +
 +                      if (fd < 0)
 +                              continue;
 +                      next = alloc_ref(path->len - name_offset + 1);
 +                      if (read_in_full(fd, buffer, 40) != 40 ||
 +                                      get_sha1_hex(buffer, next->old_sha1)) {
 +                              close(fd);
 +                              free(next);
 +                              continue;
 +                      }
 +                      close(fd);
 +                      strcpy(next->name, path->buf + name_offset);
 +                      (*tail)->next = next;
 +                      *tail = next;
 +              }
 +      }
 +      strbuf_setlen(path, pathlen);
 +
 +      for (i = 0; i < list.nr; i++)
 +              free(list.entries[i]);
 +      free(list.entries);
 +
 +      return 0;
 +}
 +
 +/* insert the packed refs for which no loose refs were found */
 +
 +static void insert_packed_refs(const char *packed_refs, struct ref **list)
 +{
 +      FILE *f = fopen(packed_refs, "r");
 +      static char buffer[PATH_MAX];
 +
 +      if (!f)
 +              return;
 +
 +      for (;;) {
 +              int cmp, len;
 +
 +              if (!fgets(buffer, sizeof(buffer), f)) {
 +                      fclose(f);
 +                      return;
 +              }
 +
 +              if (hexval(buffer[0]) > 0xf)
 +                      continue;
 +              len = strlen(buffer);
 +              if (buffer[len - 1] == '\n')
 +                      buffer[--len] = '\0';
 +              if (len < 41)
 +                      continue;
 +              while ((*list)->next &&
 +                              (cmp = strcmp(buffer + 41,
 +                                    (*list)->next->name)) > 0)
 +                      list = &(*list)->next;
 +              if (!(*list)->next || cmp < 0) {
 +                      struct ref *next = alloc_ref(len - 40);
 +                      buffer[40] = '\0';
 +                      if (get_sha1_hex(buffer, next->old_sha1)) {
 +                              warning ("invalid SHA-1: %s", buffer);
 +                              free(next);
 +                              continue;
 +                      }
 +                      strcpy(next->name, buffer + 41);
 +                      next->next = (*list)->next;
 +                      (*list)->next = next;
 +                      list = &(*list)->next;
 +              }
 +      }
 +}
 +
 +static struct ref *get_refs_via_rsync(const struct transport *transport)
 +{
 +      struct strbuf buf = STRBUF_INIT, temp_dir = STRBUF_INIT;
 +      struct ref dummy, *tail = &dummy;
 +      struct child_process rsync;
 +      const char *args[5];
 +      int temp_dir_len;
 +
 +      /* copy the refs to the temporary directory */
 +
 +      strbuf_addstr(&temp_dir, git_path("rsync-refs-XXXXXX"));
 +      if (!mkdtemp(temp_dir.buf))
 +              die ("Could not make temporary directory");
 +      temp_dir_len = temp_dir.len;
 +
 +      strbuf_addstr(&buf, transport->url);
 +      strbuf_addstr(&buf, "/refs");
 +
 +      memset(&rsync, 0, sizeof(rsync));
 +      rsync.argv = args;
 +      rsync.stdout_to_stderr = 1;
 +      args[0] = "rsync";
 +      args[1] = (transport->verbose > 0) ? "-rv" : "-r";
 +      args[2] = buf.buf;
 +      args[3] = temp_dir.buf;
 +      args[4] = NULL;
 +
 +      if (run_command(&rsync))
 +              die ("Could not run rsync to get refs");
 +
 +      strbuf_reset(&buf);
 +      strbuf_addstr(&buf, transport->url);
 +      strbuf_addstr(&buf, "/packed-refs");
 +
 +      args[2] = buf.buf;
 +
 +      if (run_command(&rsync))
 +              die ("Could not run rsync to get refs");
 +
 +      /* read the copied refs */
 +
 +      strbuf_addstr(&temp_dir, "/refs");
 +      read_loose_refs(&temp_dir, temp_dir_len + 1, &tail);
 +      strbuf_setlen(&temp_dir, temp_dir_len);
 +
 +      tail = &dummy;
 +      strbuf_addstr(&temp_dir, "/packed-refs");
 +      insert_packed_refs(temp_dir.buf, &tail);
 +      strbuf_setlen(&temp_dir, temp_dir_len);
 +
 +      if (remove_dir_recursively(&temp_dir, 0))
 +              warning ("Error removing temporary directory %s.",
 +                              temp_dir.buf);
 +
 +      strbuf_release(&buf);
 +      strbuf_release(&temp_dir);
 +
 +      return dummy.next;
 +}
 +
 +static int fetch_objs_via_rsync(struct transport *transport,
 +                               int nr_objs, struct ref **to_fetch)
 +{
 +      struct strbuf buf = STRBUF_INIT;
 +      struct child_process rsync;
 +      const char *args[8];
 +      int result;
 +
 +      strbuf_addstr(&buf, transport->url);
 +      strbuf_addstr(&buf, "/objects/");
 +
 +      memset(&rsync, 0, sizeof(rsync));
 +      rsync.argv = args;
 +      rsync.stdout_to_stderr = 1;
 +      args[0] = "rsync";
 +      args[1] = (transport->verbose > 0) ? "-rv" : "-r";
 +      args[2] = "--ignore-existing";
 +      args[3] = "--exclude";
 +      args[4] = "info";
 +      args[5] = buf.buf;
 +      args[6] = get_object_directory();
 +      args[7] = NULL;
 +
 +      /* NEEDSWORK: handle one level of alternates */
 +      result = run_command(&rsync);
 +
 +      strbuf_release(&buf);
 +
 +      return result;
 +}
 +
 +static int write_one_ref(const char *name, const unsigned char *sha1,
 +              int flags, void *data)
 +{
 +      struct strbuf *buf = data;
 +      int len = buf->len;
 +      FILE *f;
 +
 +      /* when called via for_each_ref(), flags is non-zero */
 +      if (flags && prefixcmp(name, "refs/heads/") &&
 +                      prefixcmp(name, "refs/tags/"))
 +              return 0;
 +
 +      strbuf_addstr(buf, name);
 +      if (safe_create_leading_directories(buf->buf) ||
 +                      !(f = fopen(buf->buf, "w")) ||
 +                      fprintf(f, "%s\n", sha1_to_hex(sha1)) < 0 ||
 +                      fclose(f))
 +              return error("problems writing temporary file %s", buf->buf);
 +      strbuf_setlen(buf, len);
 +      return 0;
 +}
 +
 +static int write_refs_to_temp_dir(struct strbuf *temp_dir,
 +              int refspec_nr, const char **refspec)
 +{
 +      int i;
 +
 +      for (i = 0; i < refspec_nr; i++) {
 +              unsigned char sha1[20];
 +              char *ref;
 +
 +              if (dwim_ref(refspec[i], strlen(refspec[i]), sha1, &ref) != 1)
 +                      return error("Could not get ref %s", refspec[i]);
 +
 +              if (write_one_ref(ref, sha1, 0, temp_dir)) {
 +                      free(ref);
 +                      return -1;
 +              }
 +              free(ref);
 +      }
 +      return 0;
 +}
 +
 +static int rsync_transport_push(struct transport *transport,
 +              int refspec_nr, const char **refspec, int flags)
 +{
 +      struct strbuf buf = STRBUF_INIT, temp_dir = STRBUF_INIT;
 +      int result = 0, i;
 +      struct child_process rsync;
 +      const char *args[8];
 +
 +      /* first push the objects */
 +
 +      strbuf_addstr(&buf, transport->url);
 +      strbuf_addch(&buf, '/');
 +
 +      memset(&rsync, 0, sizeof(rsync));
 +      rsync.argv = args;
 +      rsync.stdout_to_stderr = 1;
 +      args[0] = "rsync";
 +      args[1] = (transport->verbose > 0) ? "-av" : "-a";
 +      args[2] = "--ignore-existing";
 +      args[3] = "--exclude";
 +      args[4] = "info";
 +      args[5] = get_object_directory();;
 +      args[6] = buf.buf;
 +      args[7] = NULL;
 +
 +      if (run_command(&rsync))
 +              return error("Could not push objects to %s", transport->url);
 +
 +      /* copy the refs to the temporary directory; they could be packed. */
 +
 +      strbuf_addstr(&temp_dir, git_path("rsync-refs-XXXXXX"));
 +      if (!mkdtemp(temp_dir.buf))
 +              die ("Could not make temporary directory");
 +      strbuf_addch(&temp_dir, '/');
 +
 +      if (flags & TRANSPORT_PUSH_ALL) {
 +              if (for_each_ref(write_one_ref, &temp_dir))
 +                      return -1;
 +      } else if (write_refs_to_temp_dir(&temp_dir, refspec_nr, refspec))
 +              return -1;
 +
 +      i = (flags & TRANSPORT_PUSH_FORCE) ? 2 : 3;
 +      args[i++] = temp_dir.buf;
 +      args[i++] = transport->url;
 +      args[i++] = NULL;
 +      if (run_command(&rsync))
 +              result = error("Could not push to %s", transport->url);
 +
 +      if (remove_dir_recursively(&temp_dir, 0))
 +              warning ("Could not remove temporary directory %s.",
 +                              temp_dir.buf);
 +
 +      strbuf_release(&buf);
 +      strbuf_release(&temp_dir);
 +
 +      return result;
 +}
 +
 +/* Generic functions for using commit walkers */
 +
 +static int fetch_objs_via_walker(struct transport *transport,
 +                               int nr_objs, struct ref **to_fetch)
 +{
 +      char *dest = xstrdup(transport->url);
 +      struct walker *walker = transport->data;
 +      char **objs = xmalloc(nr_objs * sizeof(*objs));
 +      int i;
 +
 +      walker->get_all = 1;
 +      walker->get_tree = 1;
 +      walker->get_history = 1;
 +      walker->get_verbosely = transport->verbose >= 0;
 +      walker->get_recover = 0;
 +
 +      for (i = 0; i < nr_objs; i++)
 +              objs[i] = xstrdup(sha1_to_hex(to_fetch[i]->old_sha1));
 +
 +      if (walker_fetch(walker, nr_objs, objs, NULL, NULL))
 +              die("Fetch failed.");
 +
 +      for (i = 0; i < nr_objs; i++)
 +              free(objs[i]);
 +      free(objs);
 +      free(dest);
 +      return 0;
 +}
 +
 +static int disconnect_walker(struct transport *transport)
 +{
 +      struct walker *walker = transport->data;
 +      if (walker)
 +              walker_free(walker);
 +      return 0;
 +}
 +
 +#ifndef NO_CURL
 +static int curl_transport_push(struct transport *transport, int refspec_nr, const char **refspec, int flags) {
 +      const char **argv;
 +      int argc;
 +      int err;
 +
 +      argv = xmalloc((refspec_nr + 11) * sizeof(char *));
 +      argv[0] = "http-push";
 +      argc = 1;
 +      if (flags & TRANSPORT_PUSH_ALL)
 +              argv[argc++] = "--all";
 +      if (flags & TRANSPORT_PUSH_FORCE)
 +              argv[argc++] = "--force";
 +      argv[argc++] = transport->url;
 +      while (refspec_nr--)
 +              argv[argc++] = *refspec++;
 +      argv[argc] = NULL;
 +      err = run_command_v_opt(argv, RUN_GIT_CMD);
 +      switch (err) {
 +      case -ERR_RUN_COMMAND_FORK:
 +              error("unable to fork for %s", argv[0]);
 +      case -ERR_RUN_COMMAND_EXEC:
 +              error("unable to exec %s", argv[0]);
 +              break;
 +      case -ERR_RUN_COMMAND_WAITPID:
 +      case -ERR_RUN_COMMAND_WAITPID_WRONG_PID:
 +      case -ERR_RUN_COMMAND_WAITPID_SIGNAL:
 +      case -ERR_RUN_COMMAND_WAITPID_NOEXIT:
 +              error("%s died with strange error", argv[0]);
 +      }
 +      return !!err;
 +}
 +
 +static int missing__target(int code, int result)
 +{
 +      return  /* file:// URL -- do we ever use one??? */
 +              (result == CURLE_FILE_COULDNT_READ_FILE) ||
 +              /* http:// and https:// URL */
 +              (code == 404 && result == CURLE_HTTP_RETURNED_ERROR) ||
 +              /* ftp:// URL */
 +              (code == 550 && result == CURLE_FTP_COULDNT_RETR_FILE)
 +              ;
 +}
 +
 +#define missing_target(a) missing__target((a)->http_code, (a)->curl_result)
 +
 +static struct ref *get_refs_via_curl(const struct transport *transport)
 +{
 +      struct buffer buffer;
 +      char *data, *start, *mid;
 +      char *ref_name;
 +      char *refs_url;
 +      int i = 0;
 +
 +      struct active_request_slot *slot;
 +      struct slot_results results;
 +
 +      struct ref *refs = NULL;
 +      struct ref *ref = NULL;
 +      struct ref *last_ref = NULL;
 +
 +      data = xmalloc(4096);
 +      buffer.size = 4096;
 +      buffer.posn = 0;
 +      buffer.buffer = data;
 +
 +      refs_url = xmalloc(strlen(transport->url) + 11);
 +      sprintf(refs_url, "%s/info/refs", transport->url);
 +
 +      http_init();
 +
 +      slot = get_active_slot();
 +      slot->results = &results;
 +      curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer);
 +      curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer);
 +      curl_easy_setopt(slot->curl, CURLOPT_URL, refs_url);
 +      curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, NULL);
 +      if (start_active_slot(slot)) {
 +              run_active_slot(slot);
 +              if (results.curl_result != CURLE_OK) {
 +                      if (missing_target(&results)) {
 +                              free(buffer.buffer);
 +                              return NULL;
 +                      } else {
 +                              free(buffer.buffer);
 +                              error("%s", curl_errorstr);
 +                              return NULL;
 +                      }
 +              }
 +      } else {
 +              free(buffer.buffer);
 +              error("Unable to start request");
 +              return NULL;
 +      }
 +
 +      http_cleanup();
 +
 +      data = buffer.buffer;
 +      start = NULL;
 +      mid = data;
 +      while (i < buffer.posn) {
 +              if (!start)
 +                      start = &data[i];
 +              if (data[i] == '\t')
 +                      mid = &data[i];
 +              if (data[i] == '\n') {
 +                      data[i] = 0;
 +                      ref_name = mid + 1;
 +                      ref = xmalloc(sizeof(struct ref) +
 +                                    strlen(ref_name) + 1);
 +                      memset(ref, 0, sizeof(struct ref));
 +                      strcpy(ref->name, ref_name);
 +                      get_sha1_hex(start, ref->old_sha1);
 +                      if (!refs)
 +                              refs = ref;
 +                      if (last_ref)
 +                              last_ref->next = ref;
 +                      last_ref = ref;
 +                      start = NULL;
 +              }
 +              i++;
 +      }
 +
 +      free(buffer.buffer);
 +
 +      return refs;
 +}
 +
 +static int fetch_objs_via_curl(struct transport *transport,
 +                               int nr_objs, struct ref **to_fetch)
 +{
 +      if (!transport->data)
 +              transport->data = get_http_walker(transport->url);
 +      return fetch_objs_via_walker(transport, nr_objs, to_fetch);
 +}
 +
 +#endif
 +
 +struct bundle_transport_data {
 +      int fd;
 +      struct bundle_header header;
 +};
 +
 +static struct ref *get_refs_from_bundle(const struct transport *transport)
 +{
 +      struct bundle_transport_data *data = transport->data;
 +      struct ref *result = NULL;
 +      int i;
 +
 +      if (data->fd > 0)
 +              close(data->fd);
 +      data->fd = read_bundle_header(transport->url, &data->header);
 +      if (data->fd < 0)
 +              die ("Could not read bundle '%s'.", transport->url);
 +      for (i = 0; i < data->header.references.nr; i++) {
 +              struct ref_list_entry *e = data->header.references.list + i;
 +              struct ref *ref = alloc_ref(strlen(e->name) + 1);
 +              hashcpy(ref->old_sha1, e->sha1);
 +              strcpy(ref->name, e->name);
 +              ref->next = result;
 +              result = ref;
 +      }
 +      return result;
 +}
 +
 +static int fetch_refs_from_bundle(struct transport *transport,
 +                             int nr_heads, struct ref **to_fetch)
 +{
 +      struct bundle_transport_data *data = transport->data;
 +      return unbundle(&data->header, data->fd);
 +}
 +
 +static int close_bundle(struct transport *transport)
 +{
 +      struct bundle_transport_data *data = transport->data;
 +      if (data->fd > 0)
 +              close(data->fd);
 +      free(data);
 +      return 0;
 +}
 +
 +struct git_transport_data {
 +      unsigned thin : 1;
 +      unsigned keep : 1;
 +      int depth;
 +      const char *uploadpack;
 +      const char *receivepack;
 +};
 +
 +static int set_git_option(struct transport *connection,
 +                        const char *name, const char *value)
 +{
 +      struct git_transport_data *data = connection->data;
 +      if (!strcmp(name, TRANS_OPT_UPLOADPACK)) {
 +              data->uploadpack = value;
 +              return 0;
 +      } else if (!strcmp(name, TRANS_OPT_RECEIVEPACK)) {
 +              data->receivepack = value;
 +              return 0;
 +      } else if (!strcmp(name, TRANS_OPT_THIN)) {
 +              data->thin = !!value;
 +              return 0;
 +      } else if (!strcmp(name, TRANS_OPT_KEEP)) {
 +              data->keep = !!value;
 +              return 0;
 +      } else if (!strcmp(name, TRANS_OPT_DEPTH)) {
 +              if (!value)
 +                      data->depth = 0;
 +              else
 +                      data->depth = atoi(value);
 +              return 0;
 +      }
 +      return 1;
 +}
 +
 +static struct ref *get_refs_via_connect(const struct transport *transport)
 +{
 +      struct git_transport_data *data = transport->data;
 +      struct ref *refs;
 +      int fd[2];
 +      pid_t pid;
 +      char *dest = xstrdup(transport->url);
 +
 +      pid = git_connect(fd, dest, data->uploadpack, 0);
 +
 +      if (pid < 0)
 +              die("Failed to connect to \"%s\"", transport->url);
 +
 +      get_remote_heads(fd[0], &refs, 0, NULL, 0);
 +      packet_flush(fd[1]);
 +
 +      finish_connect(pid);
 +
 +      free(dest);
 +
 +      return refs;
 +}
 +
 +static int fetch_refs_via_pack(struct transport *transport,
 +                             int nr_heads, struct ref **to_fetch)
 +{
 +      struct git_transport_data *data = transport->data;
 +      char **heads = xmalloc(nr_heads * sizeof(*heads));
 +      char **origh = xmalloc(nr_heads * sizeof(*origh));
 +      struct ref *refs;
 +      char *dest = xstrdup(transport->url);
 +      struct fetch_pack_args args;
 +      int i;
 +
 +      memset(&args, 0, sizeof(args));
 +      args.uploadpack = data->uploadpack;
 +      args.keep_pack = data->keep;
 +      args.lock_pack = 1;
 +      args.use_thin_pack = data->thin;
 +      args.verbose = transport->verbose > 0;
 +      args.depth = data->depth;
 +
 +      for (i = 0; i < nr_heads; i++)
 +              origh[i] = heads[i] = xstrdup(to_fetch[i]->name);
 +      refs = fetch_pack(&args, dest, nr_heads, heads, &transport->pack_lockfile);
 +
 +      for (i = 0; i < nr_heads; i++)
 +              free(origh[i]);
 +      free(origh);
 +      free(heads);
 +      free_refs(refs);
 +      free(dest);
 +      return 0;
 +}
 +
 +static int git_transport_push(struct transport *transport, int refspec_nr, const char **refspec, int flags) {
 +      struct git_transport_data *data = transport->data;
 +      const char **argv;
 +      char *rem;
 +      int argc;
 +      int err;
 +
 +      argv = xmalloc((refspec_nr + 11) * sizeof(char *));
 +      argv[0] = "send-pack";
 +      argc = 1;
 +      if (flags & TRANSPORT_PUSH_ALL)
 +              argv[argc++] = "--all";
 +      if (flags & TRANSPORT_PUSH_FORCE)
 +              argv[argc++] = "--force";
++      if (flags & TRANSPORT_PUSH_DRY_RUN)
++              argv[argc++] = "--dry-run";
 +      if (data->receivepack) {
 +              char *rp = xmalloc(strlen(data->receivepack) + 16);
 +              sprintf(rp, "--receive-pack=%s", data->receivepack);
 +              argv[argc++] = rp;
 +      }
 +      if (data->thin)
 +              argv[argc++] = "--thin";
 +      rem = xmalloc(strlen(transport->remote->name) + 10);
 +      sprintf(rem, "--remote=%s", transport->remote->name);
 +      argv[argc++] = rem;
 +      argv[argc++] = transport->url;
 +      while (refspec_nr--)
 +              argv[argc++] = *refspec++;
 +      argv[argc] = NULL;
 +      err = run_command_v_opt(argv, RUN_GIT_CMD);
 +      switch (err) {
 +      case -ERR_RUN_COMMAND_FORK:
 +              error("unable to fork for %s", argv[0]);
 +      case -ERR_RUN_COMMAND_EXEC:
 +              error("unable to exec %s", argv[0]);
 +              break;
 +      case -ERR_RUN_COMMAND_WAITPID:
 +      case -ERR_RUN_COMMAND_WAITPID_WRONG_PID:
 +      case -ERR_RUN_COMMAND_WAITPID_SIGNAL:
 +      case -ERR_RUN_COMMAND_WAITPID_NOEXIT:
 +              error("%s died with strange error", argv[0]);
 +      }
 +      return !!err;
 +}
 +
 +static int disconnect_git(struct transport *transport)
 +{
 +      free(transport->data);
 +      return 0;
 +}
 +
 +static int is_local(const char *url)
 +{
 +      const char *colon = strchr(url, ':');
 +      const char *slash = strchr(url, '/');
 +      return !colon || (slash && slash < colon);
 +}
 +
 +static int is_file(const char *url)
 +{
 +      struct stat buf;
 +      if (stat(url, &buf))
 +              return 0;
 +      return S_ISREG(buf.st_mode);
 +}
 +
 +struct transport *transport_get(struct remote *remote, const char *url)
 +{
 +      struct transport *ret = xcalloc(1, sizeof(*ret));
 +
 +      ret->remote = remote;
 +      ret->url = url;
 +
 +      if (!prefixcmp(url, "rsync://")) {
 +              ret->get_refs_list = get_refs_via_rsync;
 +              ret->fetch = fetch_objs_via_rsync;
 +              ret->push = rsync_transport_push;
 +
 +      } else if (!prefixcmp(url, "http://")
 +              || !prefixcmp(url, "https://")
 +              || !prefixcmp(url, "ftp://")) {
 +#ifdef NO_CURL
 +              error("git was compiled without libcurl support.");
 +#else
 +              ret->get_refs_list = get_refs_via_curl;
 +              ret->fetch = fetch_objs_via_curl;
 +              ret->push = curl_transport_push;
 +#endif
 +              ret->disconnect = disconnect_walker;
 +
 +      } else if (is_local(url) && is_file(url)) {
 +              struct bundle_transport_data *data = xcalloc(1, sizeof(*data));
 +              ret->data = data;
 +              ret->get_refs_list = get_refs_from_bundle;
 +              ret->fetch = fetch_refs_from_bundle;
 +              ret->disconnect = close_bundle;
 +
 +      } else {
 +              struct git_transport_data *data = xcalloc(1, sizeof(*data));
 +              ret->data = data;
 +              ret->set_option = set_git_option;
 +              ret->get_refs_list = get_refs_via_connect;
 +              ret->fetch = fetch_refs_via_pack;
 +              ret->push = git_transport_push;
 +              ret->disconnect = disconnect_git;
 +
 +              data->thin = 1;
 +              data->uploadpack = "git-upload-pack";
 +              if (remote && remote->uploadpack)
 +                      data->uploadpack = remote->uploadpack;
 +              data->receivepack = "git-receive-pack";
 +              if (remote && remote->receivepack)
 +                      data->receivepack = remote->receivepack;
 +      }
 +
 +      return ret;
 +}
 +
 +int transport_set_option(struct transport *transport,
 +                       const char *name, const char *value)
 +{
 +      if (transport->set_option)
 +              return transport->set_option(transport, name, value);
 +      return 1;
 +}
 +
 +int transport_push(struct transport *transport,
 +                 int refspec_nr, const char **refspec, int flags)
 +{
 +      if (!transport->push)
 +              return 1;
 +      return transport->push(transport, refspec_nr, refspec, flags);
 +}
 +
 +struct ref *transport_get_remote_refs(struct transport *transport)
 +{
 +      if (!transport->remote_refs)
 +              transport->remote_refs = transport->get_refs_list(transport);
 +      return transport->remote_refs;
 +}
 +
 +int transport_fetch_refs(struct transport *transport, struct ref *refs)
 +{
 +      int rc;
 +      int nr_heads = 0, nr_alloc = 0;
 +      struct ref **heads = NULL;
 +      struct ref *rm;
 +
 +      for (rm = refs; rm; rm = rm->next) {
 +              if (rm->peer_ref &&
 +                  !hashcmp(rm->peer_ref->old_sha1, rm->old_sha1))
 +                      continue;
 +              ALLOC_GROW(heads, nr_heads + 1, nr_alloc);
 +              heads[nr_heads++] = rm;
 +      }
 +
 +      rc = transport->fetch(transport, nr_heads, heads);
 +      free(heads);
 +      return rc;
 +}
 +
 +void transport_unlock_pack(struct transport *transport)
 +{
 +      if (transport->pack_lockfile) {
 +              unlink(transport->pack_lockfile);
 +              free(transport->pack_lockfile);
 +              transport->pack_lockfile = NULL;
 +      }
 +}
 +
 +int transport_disconnect(struct transport *transport)
 +{
 +      int ret = 0;
 +      if (transport->disconnect)
 +              ret = transport->disconnect(transport);
 +      free(transport);
 +      return ret;
 +}
diff --cc transport.h
index 4bb51d7b7f572b2f324e3ba8d1068bc4f5883b1d,0000000000000000000000000000000000000000..df12ea74243cab68cc13eecf61106bba8ace1be4
mode 100644,000000..100644
--- /dev/null
@@@ -1,69 -1,0 +1,70 @@@
 +#ifndef TRANSPORT_H
 +#define TRANSPORT_H
 +
 +#include "cache.h"
 +#include "remote.h"
 +
 +struct transport {
 +      struct remote *remote;
 +      const char *url;
 +      void *data;
 +      struct ref *remote_refs;
 +
 +      /**
 +       * Returns 0 if successful, positive if the option is not
 +       * recognized or is inapplicable, and negative if the option
 +       * is applicable but the value is invalid.
 +       **/
 +      int (*set_option)(struct transport *connection, const char *name,
 +                        const char *value);
 +
 +      struct ref *(*get_refs_list)(const struct transport *transport);
 +      int (*fetch)(struct transport *transport, int refs_nr, struct ref **refs);
 +      int (*push)(struct transport *connection, int refspec_nr, const char **refspec, int flags);
 +
 +      int (*disconnect)(struct transport *connection);
 +      char *pack_lockfile;
 +      signed verbose : 2;
 +};
 +
 +#define TRANSPORT_PUSH_ALL 1
 +#define TRANSPORT_PUSH_FORCE 2
++#define TRANSPORT_PUSH_DRY_RUN 4
 +
 +/* Returns a transport suitable for the url */
 +struct transport *transport_get(struct remote *, const char *);
 +
 +/* Transport options which apply to git:// and scp-style URLs */
 +
 +/* The program to use on the remote side to send a pack */
 +#define TRANS_OPT_UPLOADPACK "uploadpack"
 +
 +/* The program to use on the remote side to receive a pack */
 +#define TRANS_OPT_RECEIVEPACK "receivepack"
 +
 +/* Transfer the data as a thin pack if not null */
 +#define TRANS_OPT_THIN "thin"
 +
 +/* Keep the pack that was transferred if not null */
 +#define TRANS_OPT_KEEP "keep"
 +
 +/* Limit the depth of the fetch if not null */
 +#define TRANS_OPT_DEPTH "depth"
 +
 +/**
 + * Returns 0 if the option was used, non-zero otherwise. Prints a
 + * message to stderr if the option is not used.
 + **/
 +int transport_set_option(struct transport *transport, const char *name,
 +                       const char *value);
 +
 +int transport_push(struct transport *connection,
 +                 int refspec_nr, const char **refspec, int flags);
 +
 +struct ref *transport_get_remote_refs(struct transport *transport);
 +
 +int transport_fetch_refs(struct transport *transport, struct ref *refs);
 +void transport_unlock_pack(struct transport *transport);
 +int transport_disconnect(struct transport *transport);
 +
 +#endif
diff --cc walker.c
index 5c65ea494de149d006d726484032f1b44f45a511,0000000000000000000000000000000000000000..397b80de9e949ea7d70c723f7a58c9ffdaf0a168
mode 100644,000000..100644
--- /dev/null
+++ b/walker.c
@@@ -1,318 -1,0 +1,317 @@@
- #include "strbuf.h"
 +#include "cache.h"
 +#include "walker.h"
 +#include "commit.h"
 +#include "tree.h"
 +#include "tree-walk.h"
 +#include "tag.h"
 +#include "blob.h"
 +#include "refs.h"
-       strbuf_init(&buf);
 +
 +static unsigned char current_commit_sha1[20];
 +
 +void walker_say(struct walker *walker, const char *fmt, const char *hex)
 +{
 +      if (walker->get_verbosely)
 +              fprintf(stderr, fmt, hex);
 +}
 +
 +static void report_missing(const struct object *obj)
 +{
 +      char missing_hex[41];
 +      strcpy(missing_hex, sha1_to_hex(obj->sha1));;
 +      fprintf(stderr, "Cannot obtain needed %s %s\n",
 +              obj->type ? typename(obj->type): "object", missing_hex);
 +      if (!is_null_sha1(current_commit_sha1))
 +              fprintf(stderr, "while processing commit %s.\n",
 +                      sha1_to_hex(current_commit_sha1));
 +}
 +
 +static int process(struct walker *walker, struct object *obj);
 +
 +static int process_tree(struct walker *walker, struct tree *tree)
 +{
 +      struct tree_desc desc;
 +      struct name_entry entry;
 +
 +      if (parse_tree(tree))
 +              return -1;
 +
 +      init_tree_desc(&desc, tree->buffer, tree->size);
 +      while (tree_entry(&desc, &entry)) {
 +              struct object *obj = NULL;
 +
 +              /* submodule commits are not stored in the superproject */
 +              if (S_ISGITLINK(entry.mode))
 +                      continue;
 +              if (S_ISDIR(entry.mode)) {
 +                      struct tree *tree = lookup_tree(entry.sha1);
 +                      if (tree)
 +                              obj = &tree->object;
 +              }
 +              else {
 +                      struct blob *blob = lookup_blob(entry.sha1);
 +                      if (blob)
 +                              obj = &blob->object;
 +              }
 +              if (!obj || process(walker, obj))
 +                      return -1;
 +      }
 +      free(tree->buffer);
 +      tree->buffer = NULL;
 +      tree->size = 0;
 +      return 0;
 +}
 +
 +#define COMPLETE      (1U << 0)
 +#define SEEN          (1U << 1)
 +#define TO_SCAN               (1U << 2)
 +
 +static struct commit_list *complete = NULL;
 +
 +static int process_commit(struct walker *walker, struct commit *commit)
 +{
 +      if (parse_commit(commit))
 +              return -1;
 +
 +      while (complete && complete->item->date >= commit->date) {
 +              pop_most_recent_commit(&complete, COMPLETE);
 +      }
 +
 +      if (commit->object.flags & COMPLETE)
 +              return 0;
 +
 +      hashcpy(current_commit_sha1, commit->object.sha1);
 +
 +      walker_say(walker, "walk %s\n", sha1_to_hex(commit->object.sha1));
 +
 +      if (walker->get_tree) {
 +              if (process(walker, &commit->tree->object))
 +                      return -1;
 +              if (!walker->get_all)
 +                      walker->get_tree = 0;
 +      }
 +      if (walker->get_history) {
 +              struct commit_list *parents = commit->parents;
 +              for (; parents; parents = parents->next) {
 +                      if (process(walker, &parents->item->object))
 +                              return -1;
 +              }
 +      }
 +      return 0;
 +}
 +
 +static int process_tag(struct walker *walker, struct tag *tag)
 +{
 +      if (parse_tag(tag))
 +              return -1;
 +      return process(walker, tag->tagged);
 +}
 +
 +static struct object_list *process_queue = NULL;
 +static struct object_list **process_queue_end = &process_queue;
 +
 +static int process_object(struct walker *walker, struct object *obj)
 +{
 +      if (obj->type == OBJ_COMMIT) {
 +              if (process_commit(walker, (struct commit *)obj))
 +                      return -1;
 +              return 0;
 +      }
 +      if (obj->type == OBJ_TREE) {
 +              if (process_tree(walker, (struct tree *)obj))
 +                      return -1;
 +              return 0;
 +      }
 +      if (obj->type == OBJ_BLOB) {
 +              return 0;
 +      }
 +      if (obj->type == OBJ_TAG) {
 +              if (process_tag(walker, (struct tag *)obj))
 +                      return -1;
 +              return 0;
 +      }
 +      return error("Unable to determine requirements "
 +                   "of type %s for %s",
 +                   typename(obj->type), sha1_to_hex(obj->sha1));
 +}
 +
 +static int process(struct walker *walker, struct object *obj)
 +{
 +      if (obj->flags & SEEN)
 +              return 0;
 +      obj->flags |= SEEN;
 +
 +      if (has_sha1_file(obj->sha1)) {
 +              /* We already have it, so we should scan it now. */
 +              obj->flags |= TO_SCAN;
 +      }
 +      else {
 +              if (obj->flags & COMPLETE)
 +                      return 0;
 +              walker->prefetch(walker, obj->sha1);
 +      }
 +
 +      object_list_insert(obj, process_queue_end);
 +      process_queue_end = &(*process_queue_end)->next;
 +      return 0;
 +}
 +
 +static int loop(struct walker *walker)
 +{
 +      struct object_list *elem;
 +
 +      while (process_queue) {
 +              struct object *obj = process_queue->item;
 +              elem = process_queue;
 +              process_queue = elem->next;
 +              free(elem);
 +              if (!process_queue)
 +                      process_queue_end = &process_queue;
 +
 +              /* If we are not scanning this object, we placed it in
 +               * the queue because we needed to fetch it first.
 +               */
 +              if (! (obj->flags & TO_SCAN)) {
 +                      if (walker->fetch(walker, obj->sha1)) {
 +                              report_missing(obj);
 +                              return -1;
 +                      }
 +              }
 +              if (!obj->type)
 +                      parse_object(obj->sha1);
 +              if (process_object(walker, obj))
 +                      return -1;
 +      }
 +      return 0;
 +}
 +
 +static int interpret_target(struct walker *walker, char *target, unsigned char *sha1)
 +{
 +      if (!get_sha1_hex(target, sha1))
 +              return 0;
 +      if (!check_ref_format(target)) {
 +              if (!walker->fetch_ref(walker, target, sha1)) {
 +                      return 0;
 +              }
 +      }
 +      return -1;
 +}
 +
 +static int mark_complete(const char *path, const unsigned char *sha1, int flag, void *cb_data)
 +{
 +      struct commit *commit = lookup_commit_reference_gently(sha1, 1);
 +      if (commit) {
 +              commit->object.flags |= COMPLETE;
 +              insert_by_date(commit, &complete);
 +      }
 +      return 0;
 +}
 +
 +int walker_targets_stdin(char ***target, const char ***write_ref)
 +{
 +      int targets = 0, targets_alloc = 0;
 +      struct strbuf buf;
 +      *target = NULL; *write_ref = NULL;
-               read_line(&buf, stdin, '\n');
-               if (buf.eof)
++      strbuf_init(&buf, 0);
 +      while (1) {
 +              char *rf_one = NULL;
 +              char *tg_one;
 +
++              if (strbuf_getline(&buf, stdin, '\n') == EOF)
 +                      break;
 +              tg_one = buf.buf;
 +              rf_one = strchr(tg_one, '\t');
 +              if (rf_one)
 +                      *rf_one++ = 0;
 +
 +              if (targets >= targets_alloc) {
 +                      targets_alloc = targets_alloc ? targets_alloc * 2 : 64;
 +                      *target = xrealloc(*target, targets_alloc * sizeof(**target));
 +                      *write_ref = xrealloc(*write_ref, targets_alloc * sizeof(**write_ref));
 +              }
 +              (*target)[targets] = xstrdup(tg_one);
 +              (*write_ref)[targets] = rf_one ? xstrdup(rf_one) : NULL;
 +              targets++;
 +      }
++      strbuf_release(&buf);
 +      return targets;
 +}
 +
 +void walker_targets_free(int targets, char **target, const char **write_ref)
 +{
 +      while (targets--) {
 +              free(target[targets]);
 +              if (write_ref && write_ref[targets])
 +                      free((char *) write_ref[targets]);
 +      }
 +}
 +
 +int walker_fetch(struct walker *walker, int targets, char **target,
 +               const char **write_ref, const char *write_ref_log_details)
 +{
 +      struct ref_lock **lock = xcalloc(targets, sizeof(struct ref_lock *));
 +      unsigned char *sha1 = xmalloc(targets * 20);
 +      char *msg;
 +      int ret;
 +      int i;
 +
 +      save_commit_buffer = 0;
 +      track_object_refs = 0;
 +
 +      for (i = 0; i < targets; i++) {
 +              if (!write_ref || !write_ref[i])
 +                      continue;
 +
 +              lock[i] = lock_ref_sha1(write_ref[i], NULL);
 +              if (!lock[i]) {
 +                      error("Can't lock ref %s", write_ref[i]);
 +                      goto unlock_and_fail;
 +              }
 +      }
 +
 +      if (!walker->get_recover)
 +              for_each_ref(mark_complete, NULL);
 +
 +      for (i = 0; i < targets; i++) {
 +              if (interpret_target(walker, target[i], &sha1[20 * i])) {
 +                      error("Could not interpret %s as something to pull", target[i]);
 +                      goto unlock_and_fail;
 +              }
 +              if (process(walker, lookup_unknown_object(&sha1[20 * i])))
 +                      goto unlock_and_fail;
 +      }
 +
 +      if (loop(walker))
 +              goto unlock_and_fail;
 +
 +      if (write_ref_log_details) {
 +              msg = xmalloc(strlen(write_ref_log_details) + 12);
 +              sprintf(msg, "fetch from %s", write_ref_log_details);
 +      } else {
 +              msg = NULL;
 +      }
 +      for (i = 0; i < targets; i++) {
 +              if (!write_ref || !write_ref[i])
 +                      continue;
 +              ret = write_ref_sha1(lock[i], &sha1[20 * i], msg ? msg : "fetch (unknown)");
 +              lock[i] = NULL;
 +              if (ret)
 +                      goto unlock_and_fail;
 +      }
 +      free(msg);
 +
 +      return 0;
 +
 +unlock_and_fail:
 +      for (i = 0; i < targets; i++)
 +              if (lock[i])
 +                      unlock_ref(lock[i]);
 +
 +      return -1;
 +}
 +
 +void walker_free(struct walker *walker)
 +{
 +      walker->cleanup(walker);
 +      free(walker);
 +}