-#define BN_MULL_SIZE_NORMAL (16) // 32
-#define BN_MUL_RECURSIVE_SIZE_NORMAL (16) // 32 /* less than */
-#define BN_SQR_RECURSIVE_SIZE_NORMAL (16) // 32
-#define BN_MUL_LOW_RECURSIVE_SIZE_NORMAL (32) // 32
-#define BN_MONT_CTX_SET_SIZE_WORD (64) // 32
-
-#ifndef BN_MUL_COMBA
-#define bn_mul_comba8(r,a,b) bn_mul_normal(r,a,8,b,8)
-#define bn_mul_comba4(r,a,b) bn_mul_normal(r,a,4,b,4)
-/* This is probably faster than using the C code - I need to check */
-#define bn_sqr_comba8(r,a) bn_mul_normal(r,a,8,a,8)
-#define bn_sqr_comba4(r,a) bn_mul_normal(r,a,4,a,4)
-#endif
+#define BN_MULL_SIZE_NORMAL (16) /* 32 */
+#define BN_MUL_RECURSIVE_SIZE_NORMAL (16) /* 32 less than */
+#define BN_SQR_RECURSIVE_SIZE_NORMAL (16) /* 32 */
+#define BN_MUL_LOW_RECURSIVE_SIZE_NORMAL (32) /* 32 */
+#define BN_MONT_CTX_SET_SIZE_WORD (64) /* 32 */
+
+/* 2011-02-22 SMS.
+ * In various places, a size_t variable or a type cast to size_t was
+ * used to perform integer-only operations on pointers. This failed on
+ * VMS with 64-bit pointers (CC /POINTER_SIZE = 64) because size_t is
+ * still only 32 bits. What's needed in these cases is an integer type
+ * with the same size as a pointer, which size_t is not certain to be.
+ * The only fix here is VMS-specific.
+ */
+#if defined(OPENSSL_SYS_VMS)
+# if __INITIAL_POINTER_SIZE == 64
+# define PTR_SIZE_INT long long
+# else /* __INITIAL_POINTER_SIZE == 64 */
+# define PTR_SIZE_INT int
+# endif /* __INITIAL_POINTER_SIZE == 64 [else] */
+#elif !defined(PTR_SIZE_INT) /* defined(OPENSSL_SYS_VMS) */
+# define PTR_SIZE_INT size_t
+#endif /* defined(OPENSSL_SYS_VMS) [else] */
+
+#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) && !defined(PEDANTIC)
+/*
+ * BN_UMULT_HIGH section.
+ *
+ * No, I'm not trying to overwhelm you when stating that the
+ * product of N-bit numbers is 2*N bits wide:-) No, I don't expect
+ * you to be impressed when I say that if the compiler doesn't
+ * support 2*N integer type, then you have to replace every N*N
+ * multiplication with 4 (N/2)*(N/2) accompanied by some shifts
+ * and additions which unavoidably results in severe performance
+ * penalties. Of course provided that the hardware is capable of
+ * producing 2*N result... That's when you normally start
+ * considering assembler implementation. However! It should be
+ * pointed out that some CPUs (most notably Alpha, PowerPC and
+ * upcoming IA-64 family:-) provide *separate* instruction
+ * calculating the upper half of the product placing the result
+ * into a general purpose register. Now *if* the compiler supports
+ * inline assembler, then it's not impossible to implement the
+ * "bignum" routines (and have the compiler optimize 'em)
+ * exhibiting "native" performance in C. That's what BN_UMULT_HIGH
+ * macro is about:-)
+ *
+ * <appro@fy.chalmers.se>
+ */
+# if defined(__alpha) && (defined(SIXTY_FOUR_BIT_LONG) || defined(SIXTY_FOUR_BIT))
+# if defined(__DECC)
+# include <c_asm.h>
+# define BN_UMULT_HIGH(a,b) (BN_ULONG)asm("umulh %a0,%a1,%v0",(a),(b))
+# elif defined(__GNUC__) && __GNUC__>=2
+# define BN_UMULT_HIGH(a,b) ({ \
+ register BN_ULONG ret; \
+ asm ("umulh %1,%2,%0" \
+ : "=r"(ret) \
+ : "r"(a), "r"(b)); \
+ ret; })
+# endif /* compiler */
+# elif defined(_ARCH_PPC) && defined(__64BIT__) && defined(SIXTY_FOUR_BIT_LONG)
+# if defined(__GNUC__) && __GNUC__>=2
+# define BN_UMULT_HIGH(a,b) ({ \
+ register BN_ULONG ret; \
+ asm ("mulhdu %0,%1,%2" \
+ : "=r"(ret) \
+ : "r"(a), "r"(b)); \
+ ret; })
+# endif /* compiler */
+# elif (defined(__x86_64) || defined(__x86_64__)) && \
+ (defined(SIXTY_FOUR_BIT_LONG) || defined(SIXTY_FOUR_BIT))
+# if defined(__GNUC__) && __GNUC__>=2
+# define BN_UMULT_HIGH(a,b) ({ \
+ register BN_ULONG ret,discard; \
+ asm ("mulq %3" \
+ : "=a"(discard),"=d"(ret) \
+ : "a"(a), "g"(b) \
+ : "cc"); \
+ ret; })
+# define BN_UMULT_LOHI(low,high,a,b) \
+ asm ("mulq %3" \
+ : "=a"(low),"=d"(high) \
+ : "a"(a),"g"(b) \
+ : "cc");
+# endif
+# elif (defined(_M_AMD64) || defined(_M_X64)) && defined(SIXTY_FOUR_BIT)
+# if defined(_MSC_VER) && _MSC_VER>=1400
+ unsigned __int64 __umulh (unsigned __int64 a,unsigned __int64 b);
+ unsigned __int64 _umul128 (unsigned __int64 a,unsigned __int64 b,
+ unsigned __int64 *h);
+# pragma intrinsic(__umulh,_umul128)
+# define BN_UMULT_HIGH(a,b) __umulh((a),(b))
+# define BN_UMULT_LOHI(low,high,a,b) ((low)=_umul128((a),(b),&(high)))
+# endif
+# elif defined(__mips) && (defined(SIXTY_FOUR_BIT) || defined(SIXTY_FOUR_BIT_LONG))
+# if defined(__GNUC__) && __GNUC__>=2
+# if __GNUC__>=4 && __GNUC_MINOR__>=4 /* "h" constraint is no more since 4.4 */
+# define BN_UMULT_HIGH(a,b) (((__uint128_t)(a)*(b))>>64)
+# define BN_UMULT_LOHI(low,high,a,b) ({ \
+ __uint128_t ret=(__uint128_t)(a)*(b); \
+ (high)=ret>>64; (low)=ret; })
+# else
+# define BN_UMULT_HIGH(a,b) ({ \
+ register BN_ULONG ret; \
+ asm ("dmultu %1,%2" \
+ : "=h"(ret) \
+ : "r"(a), "r"(b) : "l"); \
+ ret; })
+# define BN_UMULT_LOHI(low,high,a,b)\
+ asm ("dmultu %2,%3" \
+ : "=l"(low),"=h"(high) \
+ : "r"(a), "r"(b));
+# endif
+# endif
+# elif defined(__aarch64__) && defined(SIXTY_FOUR_BIT_LONG)
+# if defined(__GNUC__) && __GNUC__>=2
+# define BN_UMULT_HIGH(a,b) ({ \
+ register BN_ULONG ret; \
+ asm ("umulh %0,%1,%2" \
+ : "=r"(ret) \
+ : "r"(a), "r"(b)); \
+ ret; })
+# endif
+# endif /* cpu */
+#endif /* OPENSSL_NO_ASM */