/* crypto/md32_common.h */
/* ====================================================================
- * Copyright (c) 1999 The OpenSSL Project. All rights reserved.
+ * Copyright (c) 1999-2002 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
*/
#undef ROTATE
#ifndef PEDANTIC
-# if defined(_MSC_VER)
+# if defined(_MSC_VER) || defined(__ICC)
# define ROTATE(a,n) _lrotl(a,n)
# elif defined(__MWERKS__)
# if defined(__POWERPC__)
# else
# define ROTATE(a,n) __rol(a,n)
# endif
-# elif defined(__GNUC__) && __GNUC__>=2 && !defined(NO_ASM)
+# elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
/*
* Some GNU C inline assembler templates. Note that these are
* rotates by *constant* number of bits! But that's exactly
*
* <appro@fy.chalmers.se>
*/
-# if defined(__i386)
+# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
# define ROTATE(a,n) ({ register unsigned int ret; \
- asm volatile ( \
+ asm ( \
"roll %1,%0" \
: "=r"(ret) \
: "I"(n), "0"(a) \
: "cc"); \
ret; \
})
-# elif defined(__powerpc)
+# elif defined(__powerpc) || defined(__ppc)
# define ROTATE(a,n) ({ register unsigned int ret; \
- asm volatile ( \
+ asm ( \
"rlwinm %0,%1,%2,0,31" \
: "=r"(ret) \
: "r"(a), "I"(n)); \
* Engage compiler specific "fetch in reverse byte order"
* intrinsic function if available.
*/
-# if defined(__GNUC__) && __GNUC__>=2 && !defined(NO_ASM)
+# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
/* some GNU C inline assembler templates by <appro@fy.chalmers.se> */
-# if defined(__i386) && !defined(I386_ONLY)
+# if (defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)) && !defined(I386_ONLY)
# define BE_FETCH32(a) ({ register unsigned int l=(a);\
- asm volatile ( \
+ asm ( \
"bswapl %0" \
: "=r"(l) : "0"(l)); \
l; \
})
# elif defined(__powerpc)
# define LE_FETCH32(a) ({ register unsigned int l; \
- asm volatile ( \
+ asm ( \
"lwbrx %0,0,%1" \
: "=r"(l) \
: "r"(a)); \
l; \
})
-# elif defined(__sparc) && defined(ULTRASPARC)
+# elif defined(__sparc) && defined(OPENSSL_SYS_ULTRASPARC)
# define LE_FETCH32(a) ({ register unsigned int l; \
- asm volatile ( \
+ asm ( \
"lda [%1]#ASI_PRIMARY_LITTLE,%0"\
: "=r"(l) \
: "r"(a)); \
* Time for some action:-)
*/
-void HASH_UPDATE (HASH_CTX *c, const unsigned char *data, unsigned long len)
+int HASH_UPDATE (HASH_CTX *c, const void *data_, unsigned long len)
{
+ const unsigned char *data=data_;
register HASH_LONG * p;
register unsigned long l;
int sw,sc,ew,ec;
- if (len==0) return;
+ if (len==0) return 1;
l=(c->Nl+(len<<3))&0xffffffffL;
/* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
{
ew=(c->num>>2);
ec=(c->num&0x03);
- l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l;
+ if (sc)
+ l=p[sw];
+ HOST_p_c2l(data,l,sc);
+ p[sw++]=l;
for (; sw < ew; sw++)
{
HOST_c2l(data,l); p[sw]=l;
HOST_c2l_p(data,l,ec); p[sw]=l;
}
}
- return;
+ return 1;
}
}
if ((((unsigned long)data)%4) == 0)
{
/* data is properly aligned so that we can cast it: */
- HASH_BLOCK_DATA_ORDER_ALIGNED (c,(HASH_LONG *)data,sw);
+ HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,sw);
sw*=HASH_CBLOCK;
data+=sw;
len-=sw;
HOST_c2l_p(data,l,ec);
*p=l;
}
+ return 1;
}
#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
if ((((unsigned long)data)%4) == 0)
/* data is properly aligned so that we can cast it: */
- HASH_BLOCK_DATA_ORDER_ALIGNED (c,(HASH_LONG *)data,1);
+ HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,1);
else
#if !defined(HASH_BLOCK_DATA_ORDER)
{
}
-void HASH_FINAL (unsigned char *md, HASH_CTX *c)
+int HASH_FINAL (unsigned char *md, HASH_CTX *c)
{
register HASH_LONG *p;
register unsigned long l;
c->num=0;
/* clear stuff, HASH_BLOCK may be leaving some stuff on the stack
* but I'm not worried :-)
- memset((void *)c,0,sizeof(HASH_CTX));
+ OPENSSL_cleanse((void *)c,sizeof(HASH_CTX));
*/
+ return 1;
}
+
+#ifndef MD32_REG_T
+#define MD32_REG_T long
+/*
+ * This comment was originaly written for MD5, which is why it
+ * discusses A-D. But it basically applies to all 32-bit digests,
+ * which is why it was moved to common header file.
+ *
+ * In case you wonder why A-D are declared as long and not
+ * as MD5_LONG. Doing so results in slight performance
+ * boost on LP64 architectures. The catch is we don't
+ * really care if 32 MSBs of a 64-bit register get polluted
+ * with eventual overflows as we *save* only 32 LSBs in
+ * *either* case. Now declaring 'em long excuses the compiler
+ * from keeping 32 MSBs zeroed resulting in 13% performance
+ * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
+ * Well, to be honest it should say that this *prevents*
+ * performance degradation.
+ * <appro@fy.chalmers.se>
+ * Apparently there're LP64 compilers that generate better
+ * code if A-D are declared int. Most notably GCC-x86_64
+ * generates better code.
+ * <appro@fy.chalmers.se>
+ */
+#endif