X-Git-Url: https://git.tokkee.org/?a=blobdiff_plain;f=block-sha1%2Fsha1.c;h=d8934757a5e5e259f26c4a09f7ea5d10615df0c1;hb=812d2a3d61b1bbb1931aff2ed6d2a17e939f5bf2;hp=c3f1ae59b9252e499fc8d79dfd0f8fac49bacf2e;hpb=30ba0de726d92ccfc93009eb60f2c30b0886f61b;p=git.git diff --git a/block-sha1/sha1.c b/block-sha1/sha1.c index c3f1ae59b..d8934757a 100644 --- a/block-sha1/sha1.c +++ b/block-sha1/sha1.c @@ -1,20 +1,28 @@ /* - * Based on the Mozilla SHA1 (see mozilla-sha1/sha1.c), - * optimized to do word accesses rather than byte accesses, + * SHA1 routine optimized to do word accesses rather than byte accesses, * and to avoid unnecessary copies into the context array. + * + * This was initially based on the Mozilla SHA1 implementation, although + * none of the original Mozilla code remains. */ -#include -#include +/* this is only to get definitions for memcpy(), ntohl() and htonl() */ +#include "../git-compat-util.h" #include "sha1.h" -#if defined(__i386__) || defined(__x86_64__) +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + +/* + * Force usage of rol or ror by selecting the one with the smaller constant. + * It _can_ generate slightly smaller code (a constant of 1 is special), but + * perhaps more importantly it's possibly faster on any uarch that does a + * rotate with a loop. + */ #define SHA_ASM(op, x, n) ({ unsigned int __res; __asm__(op " %1,%0":"=r" (__res):"i" (n), "0" (x)); __res; }) #define SHA_ROL(x,n) SHA_ASM("rol", x, n) #define SHA_ROR(x,n) SHA_ASM("ror", x, n) -#define SMALL_REGISTER_SET #else @@ -24,9 +32,6 @@ #endif -/* This "rolls" over the 512-bit array */ -#define W(x) (array[(x)&15]) - /* * If you have 32 registers or more, the compiler can (and should) * try to change the array[] accesses into registers. However, on @@ -43,18 +48,59 @@ * Ben Herrenschmidt reports that on PPC, the C version comes close * to the optimized asm with this (ie on PPC you don't want that * 'volatile', since there are lots of registers). + * + * On ARM we get the best code generation by forcing a full memory barrier + * between each SHA_ROUND, otherwise gcc happily get wild with spilling and + * the stack frame size simply explode and performance goes down the drain. */ -#ifdef SMALL_REGISTER_SET + +#if defined(__i386__) || defined(__x86_64__) #define setW(x, val) (*(volatile unsigned int *)&W(x) = (val)) +#elif defined(__GNUC__) && defined(__arm__) + #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0) #else #define setW(x, val) (W(x) = (val)) #endif +/* + * Performance might be improved if the CPU architecture is OK with + * unaligned 32-bit loads and a fast ntohl() is available. + * Otherwise fall back to byte loads and shifts which is portable, + * and is faster on architectures with memory alignment issues. + */ + +#if defined(__i386__) || defined(__x86_64__) || \ + defined(__ppc__) || defined(__ppc64__) || \ + defined(__powerpc__) || defined(__powerpc64__) || \ + defined(__s390__) || defined(__s390x__) + +#define get_be32(p) ntohl(*(unsigned int *)(p)) +#define put_be32(p, v) do { *(unsigned int *)(p) = htonl(v); } while (0) + +#else + +#define get_be32(p) ( \ + (*((unsigned char *)(p) + 0) << 24) | \ + (*((unsigned char *)(p) + 1) << 16) | \ + (*((unsigned char *)(p) + 2) << 8) | \ + (*((unsigned char *)(p) + 3) << 0) ) +#define put_be32(p, v) do { \ + unsigned int __v = (v); \ + *((unsigned char *)(p) + 0) = __v >> 24; \ + *((unsigned char *)(p) + 1) = __v >> 16; \ + *((unsigned char *)(p) + 2) = __v >> 8; \ + *((unsigned char *)(p) + 3) = __v >> 0; } while (0) + +#endif + +/* This "rolls" over the 512-bit array */ +#define W(x) (array[(x)&15]) + /* * Where do we get the source from? The first 16 iterations get it from * the input data, the next mix it from the 512-bit array. */ -#define SHA_SRC(t) htonl(data[t]) +#define SHA_SRC(t) get_be32(data + t) #define SHA_MIX(t) SHA_ROL(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1) #define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \ @@ -202,14 +248,14 @@ void blk_SHA1_Update(blk_SHA_CTX *ctx, const void *data, unsigned long len) memcpy(lenW + (char *)ctx->W, data, left); lenW = (lenW + left) & 63; len -= left; - data += left; + data = ((const char *)data + left); if (lenW) return; blk_SHA1_Block(ctx, ctx->W); } while (len >= 64) { blk_SHA1_Block(ctx, data); - data += 64; + data = ((const char *)data + 64); len -= 64; } if (len) @@ -232,5 +278,5 @@ void blk_SHA1_Final(unsigned char hashout[20], blk_SHA_CTX *ctx) /* Output hash */ for (i = 0; i < 5; i++) - ((unsigned int *)hashout)[i] = htonl(ctx->H[i]); + put_be32(hashout + i*4, ctx->H[i]); }