X-Git-Url: https://git.openssl.org/?p=openssl.git;a=blobdiff_plain;f=crypto%2Fbn%2Fbn_lcl.h;h=64df27aaf93e22091a5b38854a7b8a5208b312b9;hp=1c680fc3ce2f867f12adaf7f44a6886d7ad24b4a;hb=a58fdc7a346e717308984e2b6d22b1081d708b0a;hpb=46a643763de6d8e39ecf6f76fa79b4d04885aa59 diff --git a/crypto/bn/bn_lcl.h b/crypto/bn/bn_lcl.h index 1c680fc3ce..64df27aaf9 100644 --- a/crypto/bn/bn_lcl.h +++ b/crypto/bn/bn_lcl.h @@ -210,6 +210,24 @@ extern "C" { #define BN_MUL_LOW_RECURSIVE_SIZE_NORMAL (32) /* 32 */ #define BN_MONT_CTX_SET_SIZE_WORD (64) /* 32 */ +/* 2011-02-22 SMS. + * In various places, a size_t variable or a type cast to size_t was + * used to perform integer-only operations on pointers. This failed on + * VMS with 64-bit pointers (CC /POINTER_SIZE = 64) because size_t is + * still only 32 bits. What's needed in these cases is an integer type + * with the same size as a pointer, which size_t is not certain to be. + * The only fix here is VMS-specific. + */ +#if defined(OPENSSL_SYS_VMS) +# if __INITIAL_POINTER_SIZE == 64 +# define PTR_SIZE_INT long long +# else /* __INITIAL_POINTER_SIZE == 64 */ +# define PTR_SIZE_INT int +# endif /* __INITIAL_POINTER_SIZE == 64 [else] */ +#elif !defined(PTR_SIZE_INT) /* defined(OPENSSL_SYS_VMS) */ +# define PTR_SIZE_INT size_t +#endif /* defined(OPENSSL_SYS_VMS) [else] */ + #if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC) /* * BN_UMULT_HIGH section. @@ -238,7 +256,7 @@ extern "C" { # if defined(__DECC) # include # define BN_UMULT_HIGH(a,b) (BN_ULONG)asm("umulh %a0,%a1,%v0",(a),(b)) -# elif defined(__GNUC__) +# elif defined(__GNUC__) && __GNUC__>=2 # define BN_UMULT_HIGH(a,b) ({ \ register BN_ULONG ret; \ asm ("umulh %1,%2,%0" \ @@ -247,7 +265,7 @@ extern "C" { ret; }) # endif /* compiler */ # elif defined(_ARCH_PPC) && defined(__64BIT__) && defined(SIXTY_FOUR_BIT_LONG) -# if defined(__GNUC__) +# if defined(__GNUC__) && __GNUC__>=2 # define BN_UMULT_HIGH(a,b) ({ \ register BN_ULONG ret; \ asm ("mulhdu %0,%1,%2" \ @@ -255,8 +273,9 @@ extern "C" { : "r"(a), "r"(b)); \ ret; }) # endif /* compiler */ -# elif defined(__x86_64) && defined(SIXTY_FOUR_BIT_LONG) -# if defined(__GNUC__) +# elif (defined(__x86_64) || defined(__x86_64__)) && \ + (defined(SIXTY_FOUR_BIT_LONG) || defined(SIXTY_FOUR_BIT)) +# if defined(__GNUC__) && __GNUC__>=2 # define BN_UMULT_HIGH(a,b) ({ \ register BN_ULONG ret,discard; \ asm ("mulq %3" \ @@ -270,6 +289,35 @@ extern "C" { : "a"(a),"g"(b) \ : "cc"); # endif +# elif (defined(_M_AMD64) || defined(_M_X64)) && defined(SIXTY_FOUR_BIT) +# if defined(_MSC_VER) && _MSC_VER>=1400 + unsigned __int64 __umulh (unsigned __int64 a,unsigned __int64 b); + unsigned __int64 _umul128 (unsigned __int64 a,unsigned __int64 b, + unsigned __int64 *h); +# pragma intrinsic(__umulh,_umul128) +# define BN_UMULT_HIGH(a,b) __umulh((a),(b)) +# define BN_UMULT_LOHI(low,high,a,b) ((low)=_umul128((a),(b),&(high))) +# endif +# elif defined(__mips) && (defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG)) +# if defined(__GNUC__) && __GNUC__>=2 +# if __GNUC__>=4 && __GNUC_MINOR__>=4 /* "h" constraint is no more since 4.4 */ +# define BN_UMULT_HIGH(a,b) (((__uint128_t)(a)*(b))>>64) +# define BN_UMULT_LOHI(low,high,a,b) ({ \ + __uint128_t ret=(__uint128_t)(a)*(b); \ + (high)=ret>>64; (low)=ret; }) +# else +# define BN_UMULT_HIGH(a,b) ({ \ + register BN_ULONG ret; \ + asm ("dmultu %1,%2" \ + : "=h"(ret) \ + : "r"(a), "r"(b) : "l"); \ + ret; }) +# define BN_UMULT_LOHI(low,high,a,b)\ + asm ("dmultu %2,%3" \ + : "=l"(low),"=h"(high) \ + : "r"(a), "r"(b)); +# endif +# endif # endif /* cpu */ #endif /* OPENSSL_NO_ASM */ @@ -313,6 +361,33 @@ extern "C" { (r1)=Hw(t); \ } +#elif defined(BN_UMULT_LOHI) +#define mul_add(r,a,w,c) { \ + BN_ULONG high,low,ret,tmp=(a); \ + ret = (r); \ + BN_UMULT_LOHI(low,high,w,tmp); \ + ret += (c); \ + (c) = (ret<(c))?1:0; \ + (c) += high; \ + ret += low; \ + (c) += (ret