3-4 times better RSA/DSA performance on WIN64A target. Well, on AMD64 CPU,
[openssl.git] / crypto / bn / bn_asm.c
index 19978085b26a5506e4a61ba2850b2cc7be31f6c8..99bc2de4913e8b88c21c3f610f2e4bdae71bfce5 100644 (file)
@@ -459,6 +459,34 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int n)
 #define sqr_add_c2(a,i,j,c0,c1,c2) \
        mul_add_c2((a)[i],(a)[j],c0,c1,c2)
 
+#elif defined(BN_UMULT_LOHI)
+
+#define mul_add_c(a,b,c0,c1,c2)        {       \
+       BN_ULONG ta=(a),tb=(b);         \
+       BN_UMULT_LOHI(t1,t2,ta,tb);     \
+       c0 += t1; t2 += (c0<t1)?1:0;    \
+       c1 += t2; c2 += (c1<t2)?1:0;    \
+       }
+
+#define mul_add_c2(a,b,c0,c1,c2) {     \
+       BN_ULONG ta=(a),tb=(b),t0;      \
+       BN_UMULT_LOHI(t0,t1,ta,tb);     \
+       t2 = t1+t1; c2 += (t2<t1)?1:0;  \
+       t1 = t0+t0; t2 += (t1<t0)?1:0;  \
+       c0 += t1; t2 += (c0<t1)?1:0;    \
+       c1 += t2; c2 += (c1<t2)?1:0;    \
+       }
+
+#define sqr_add_c(a,i,c0,c1,c2)        {       \
+       BN_ULONG ta=(a)[i];             \
+       BN_UMULT_LOHI(t1,t2,ta,ta);     \
+       c0 += t1; t2 += (c0<t1)?1:0;    \
+       c1 += t2; c2 += (c1<t2)?1:0;    \
+       }
+
+#define sqr_add_c2(a,i,j,c0,c1,c2)     \
+       mul_add_c2((a)[i],(a)[j],c0,c1,c2)
+
 #elif defined(BN_UMULT_HIGH)
 
 #define mul_add_c(a,b,c0,c1,c2)        {       \