summaryrefslogtreecommitdiff
path: root/block-sha1/sha1.c
diff options
context:
space:
mode:
Diffstat (limited to 'block-sha1/sha1.c')
-rw-r--r--block-sha1/sha1.c36
1 files changed, 2 insertions, 34 deletions
diff --git a/block-sha1/sha1.c b/block-sha1/sha1.c
index a8d4bf9..22b125c 100644
--- a/block-sha1/sha1.c
+++ b/block-sha1/sha1.c
@@ -62,38 +62,6 @@
#define setW(x, val) (W(x) = (val))
#endif
-/*
- * Performance might be improved if the CPU architecture is OK with
- * unaligned 32-bit loads and a fast ntohl() is available.
- * Otherwise fall back to byte loads and shifts which is portable,
- * and is faster on architectures with memory alignment issues.
- */
-
-#if defined(__i386__) || defined(__x86_64__) || \
- defined(_M_IX86) || defined(_M_X64) || \
- defined(__ppc__) || defined(__ppc64__) || \
- defined(__powerpc__) || defined(__powerpc64__) || \
- defined(__s390__) || defined(__s390x__)
-
-#define get_be32(p) ntohl(*(unsigned int *)(p))
-#define put_be32(p, v) do { *(unsigned int *)(p) = htonl(v); } while (0)
-
-#else
-
-#define get_be32(p) ( \
- (*((unsigned char *)(p) + 0) << 24) | \
- (*((unsigned char *)(p) + 1) << 16) | \
- (*((unsigned char *)(p) + 2) << 8) | \
- (*((unsigned char *)(p) + 3) << 0) )
-#define put_be32(p, v) do { \
- unsigned int __v = (v); \
- *((unsigned char *)(p) + 0) = __v >> 24; \
- *((unsigned char *)(p) + 1) = __v >> 16; \
- *((unsigned char *)(p) + 2) = __v >> 8; \
- *((unsigned char *)(p) + 3) = __v >> 0; } while (0)
-
-#endif
-
/* This "rolls" over the 512-bit array */
#define W(x) (array[(x)&15])
@@ -274,10 +242,10 @@ void blk_SHA1_Final(unsigned char hashout[20], blk_SHA_CTX *ctx)
padlen[1] = htonl((uint32_t)(ctx->size << 3));
i = ctx->size & 63;
- blk_SHA1_Update(ctx, pad, 1+ (63 & (55 - i)));
+ blk_SHA1_Update(ctx, pad, 1 + (63 & (55 - i)));
blk_SHA1_Update(ctx, padlen, 8);
/* Output hash */
for (i = 0; i < 5; i++)
- put_be32(hashout + i*4, ctx->H[i]);
+ put_be32(hashout + i * 4, ctx->H[i]);
}