bn/bn_exp.c: Solaris-specific fix, T4 MONTMUL relies on alloca.
[openssl.git] / crypto / bn / bn_exp.c
index 5c492365f3172b1faa94a1473e0781f3dbc61667..adc478caa639d18842d4a85afc57c4796c7e3b82 100644 (file)
 # ifndef alloca
 #  define alloca(s) __builtin_alloca((s))
 # endif
+#elif defined(__sun)
+# include <alloca.h>
+#endif
+
+#if defined(OPENSSL_BN_ASM_MONT) && (defined(__sparc__) || defined(__sparc))
+# include "sparc_arch.h"
+extern unsigned int OPENSSL_sparcv9cap_P[];
 #endif
 
 /* maximum precomputation table size for *variable* sliding windows */
@@ -468,7 +475,15 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
        wstart=bits-1;  /* The top bit of the window */
        wend=0;         /* The bottom bit of the window */
 
+#if 1  /* by Shay Gueron's suggestion */
+       j = mont->N.top;        /* borrow j */
+       if (bn_wexpand(r,j) == NULL) goto err;
+       r->d[0] = (0-m->d[0])&BN_MASK2;         /* 2^(top*BN_BITS2) - m */
+       for(i=1;i<j;i++) r->d[i] = (~m->d[i])&BN_MASK2;
+       r->top = j;
+#else
        if (!BN_to_montgomery(r,BN_value_one(),mont,ctx)) goto err;
+#endif
        for (;;)
                {
                if (BN_is_bit_set(p,wstart) == 0)
@@ -520,6 +535,17 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
                start=0;
                if (wstart < 0) break;
                }
+#if defined(OPENSSL_BN_ASM_MONT) && (defined(__sparc__) || defined(__sparc))
+       if (OPENSSL_sparcv9cap_P[0]&(SPARCV9_VIS3|SPARCV9_PREFER_FPU))
+               {
+               j = mont->N.top;        /* borrow j */
+               val[0]->d[0] = 1;       /* borrow val[0] */
+               for (i=1;i<j;i++) val[0]->d[i] = 0;
+               val[0]->top = j;
+               if (!BN_mod_mul_montgomery(rr,r,val[0],mont,ctx)) goto err;
+               }
+       else
+#endif
        if (!BN_from_montgomery(rr,r,mont,ctx)) goto err;
        ret=1;
 err:
@@ -529,6 +555,28 @@ err:
        return(ret);
        }
 
+#if defined(OPENSSL_BN_ASM_MONT) && (defined(__sparc__) || defined(__sparc))
+static BN_ULONG bn_get_bits(const BIGNUM *a, int bitpos)
+       {
+       BN_ULONG ret=0;
+       int wordpos;
+
+       wordpos = bitpos/BN_BITS2;
+       bitpos %= BN_BITS2;
+       if (wordpos>=0 && wordpos < a->top)
+               {
+               ret = a->d[wordpos]&BN_MASK2;
+               if (bitpos)
+                       {
+                       ret >>= bitpos;
+                       if (++wordpos < a->top)
+                               ret |= a->d[wordpos]<<(BN_BITS2-bitpos);
+                       }
+               }
+
+       return ret&BN_MASK2;
+}
+#endif
 
 /* BN_mod_exp_mont_consttime() stores the precomputed powers in a specific layout
  * so that accessing any of these table values shows the same access pattern as far
@@ -588,6 +636,9 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
        int powerbufLen = 0;
        unsigned char *powerbuf=NULL;
        BIGNUM tmp, am;
+#if defined(OPENSSL_BN_ASM_MONT) && (defined(__sparc__) || defined(__sparc))
+       unsigned int t4=0;
+#endif
 
        bn_check_top(a);
        bn_check_top(p);
@@ -622,9 +673,18 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
 
        /* Get the window size to use with size of p. */
        window = BN_window_bits_for_ctime_exponent_size(bits);
+#if defined(OPENSSL_BN_ASM_MONT) && (defined(__sparc__) || defined(__sparc))
+       if (window>=5 && (top&15)==0 && top<=64 &&
+           (OPENSSL_sparcv9cap_P[1]&(CFR_MONTMUL|CFR_MONTSQR))==
+                                    (CFR_MONTMUL|CFR_MONTSQR) &&
+           (t4=OPENSSL_sparcv9cap_P[0]))
+               window=5;
+       else
+#endif
 #if defined(OPENSSL_BN_ASM_MONT5)
        if (window==6 && bits<=1024) window=5;  /* ~5% improvement of 2048-bit RSA sign */
 #endif
+       (void)0;
 
        /* Allocate a buffer large enough to hold all of the pre-computed
         * powers of am, am itself and tmp.
@@ -657,13 +717,13 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
        tmp.flags = am.flags = BN_FLG_STATIC_DATA;
 
        /* prepare a^0 in Montgomery domain */
-#if 1
-       if (!BN_to_montgomery(&tmp,BN_value_one(),mont,ctx))    goto err;
-#else
+#if 1  /* by Shay Gueron's suggestion */
        tmp.d[0] = (0-m->d[0])&BN_MASK2;        /* 2^(top*BN_BITS2) - m */
        for (i=1;i<top;i++)
                tmp.d[i] = (~m->d[i])&BN_MASK2;
        tmp.top = top;
+#else
+       if (!BN_to_montgomery(&tmp,BN_value_one(),mont,ctx))    goto err;
 #endif
 
        /* prepare a^1 in Montgomery domain */
@@ -674,6 +734,121 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
                }
        else    if (!BN_to_montgomery(&am,a,mont,ctx))          goto err;
 
+#if defined(OPENSSL_BN_ASM_MONT) && (defined(__sparc__) || defined(__sparc))
+    if (t4)
+       {
+       typedef int (*bn_pwr5_mont_f)(BN_ULONG *tp,const BN_ULONG *np,
+                       const BN_ULONG *n0,const void *table,int power,int bits);
+       int bn_pwr5_mont_t4_8(BN_ULONG *tp,const BN_ULONG *np,
+                       const BN_ULONG *n0,const void *table,int power,int bits);
+       int bn_pwr5_mont_t4_16(BN_ULONG *tp,const BN_ULONG *np,
+                       const BN_ULONG *n0,const void *table,int power,int bits);
+       int bn_pwr5_mont_t4_24(BN_ULONG *tp,const BN_ULONG *np,
+                       const BN_ULONG *n0,const void *table,int power,int bits);
+       int bn_pwr5_mont_t4_32(BN_ULONG *tp,const BN_ULONG *np,
+                       const BN_ULONG *n0,const void *table,int power,int bits);
+       static const bn_pwr5_mont_f pwr5_funcs[4] = {
+                       bn_pwr5_mont_t4_8,      bn_pwr5_mont_t4_16,
+                       bn_pwr5_mont_t4_24,     bn_pwr5_mont_t4_32 };
+       bn_pwr5_mont_f pwr5_worker = pwr5_funcs[top/16-1];
+
+       typedef int (*bn_mul_mont_f)(BN_ULONG *rp,const BN_ULONG *ap,
+                       const void *bp,const BN_ULONG *np,const BN_ULONG *n0);
+       int bn_mul_mont_t4_8(BN_ULONG *rp,const BN_ULONG *ap,
+                       const void *bp,const BN_ULONG *np,const BN_ULONG *n0);
+       int bn_mul_mont_t4_16(BN_ULONG *rp,const BN_ULONG *ap,
+                       const void *bp,const BN_ULONG *np,const BN_ULONG *n0);
+       int bn_mul_mont_t4_24(BN_ULONG *rp,const BN_ULONG *ap,
+                       const void *bp,const BN_ULONG *np,const BN_ULONG *n0);
+       int bn_mul_mont_t4_32(BN_ULONG *rp,const BN_ULONG *ap,
+                       const void *bp,const BN_ULONG *np,const BN_ULONG *n0);
+       static const bn_mul_mont_f mul_funcs[4] = {
+                       bn_mul_mont_t4_8,       bn_mul_mont_t4_16,
+                       bn_mul_mont_t4_24,      bn_mul_mont_t4_32 };
+       bn_mul_mont_f mul_worker = mul_funcs[top/16-1];
+
+       void bn_mul_mont_vis3(BN_ULONG *rp,const BN_ULONG *ap,
+                       const void *bp,const BN_ULONG *np,
+                       const BN_ULONG *n0,int num);
+       void bn_mul_mont_t4(BN_ULONG *rp,const BN_ULONG *ap,
+                       const void *bp,const BN_ULONG *np,
+                       const BN_ULONG *n0,int num);
+       void bn_mul_mont_gather5_t4(BN_ULONG *rp,const BN_ULONG *ap,
+                       const void *table,const BN_ULONG *np,
+                       const BN_ULONG *n0,int num,int power);
+       void bn_flip_n_scatter5_t4(const BN_ULONG *inp,size_t num,
+                       void *table,size_t power);
+       void bn_gather5_t4(BN_ULONG *out,size_t num,
+                       void *table,size_t power);
+       void bn_flip_t4(BN_ULONG *dst,BN_ULONG *src,size_t num);
+
+       BN_ULONG *np=mont->N.d, *n0=mont->n0;
+       int stride = 5*(6-(top/16-1));  /* multiple of 5, but less than 32 */
+
+       /* BN_to_montgomery can contaminate words above .top
+        * [in BN_DEBUG[_DEBUG] build]... */
+       for (i=am.top; i<top; i++)      am.d[i]=0;
+       for (i=tmp.top; i<top; i++)     tmp.d[i]=0;
+
+       bn_flip_n_scatter5_t4(tmp.d,top,powerbuf,0);
+       bn_flip_n_scatter5_t4(am.d,top,powerbuf,1);
+       if (!(*mul_worker)(tmp.d,am.d,am.d,np,n0) &&
+           !(*mul_worker)(tmp.d,am.d,am.d,np,n0))
+               bn_mul_mont_vis3(tmp.d,am.d,am.d,np,n0,top);
+       bn_flip_n_scatter5_t4(tmp.d,top,powerbuf,2);
+
+       for (i=3; i<32; i++)
+               {
+               /* Calculate a^i = a^(i-1) * a */
+               if (!(*mul_worker)(tmp.d,tmp.d,am.d,np,n0) &&
+                   !(*mul_worker)(tmp.d,tmp.d,am.d,np,n0))
+                       bn_mul_mont_vis3(tmp.d,tmp.d,am.d,np,n0,top);
+               bn_flip_n_scatter5_t4(tmp.d,top,powerbuf,i);
+               }
+
+       /* switch to 64-bit domain */ 
+       np = alloca(top*sizeof(BN_ULONG));
+       top /= 2;
+       bn_flip_t4(np,mont->N.d,top);
+
+       bits--;
+       for (wvalue=0, i=bits%5; i>=0; i--,bits--)
+               wvalue = (wvalue<<1)+BN_is_bit_set(p,bits);
+       bn_gather5_t4(tmp.d,top,powerbuf,wvalue);
+
+       /* Scan the exponent one window at a time starting from the most
+        * significant bits.
+        */
+       while (bits >= 0)
+               {
+               if (bits < stride) stride = bits+1;
+               bits -= stride;
+               wvalue = bn_get_bits(p,bits+1);
+
+               if ((*pwr5_worker)(tmp.d,np,n0,powerbuf,wvalue,stride)) continue;
+               /* retry once and fall back */
+               if ((*pwr5_worker)(tmp.d,np,n0,powerbuf,wvalue,stride)) continue;
+
+               bits += stride-5;
+               wvalue >>= stride-5;
+               wvalue &= 31;
+               bn_mul_mont_t4(tmp.d,tmp.d,tmp.d,np,n0,top);
+               bn_mul_mont_t4(tmp.d,tmp.d,tmp.d,np,n0,top);
+               bn_mul_mont_t4(tmp.d,tmp.d,tmp.d,np,n0,top);
+               bn_mul_mont_t4(tmp.d,tmp.d,tmp.d,np,n0,top);
+               bn_mul_mont_t4(tmp.d,tmp.d,tmp.d,np,n0,top);
+               bn_mul_mont_gather5_t4(tmp.d,tmp.d,powerbuf,np,n0,top,wvalue);
+               }
+
+       bn_flip_t4(tmp.d,tmp.d,top);
+       top *= 2;
+       /* back to 32-bit domain */
+       tmp.top=top;
+       bn_correct_top(&tmp);
+       OPENSSL_cleanse(np,top*sizeof(BN_ULONG));
+       }
+    else
+#endif
 #if defined(OPENSSL_BN_ASM_MONT5)
     /* This optimization uses ideas from http://eprint.iacr.org/2011/239,
      * specifically optimization of cache-timing attack countermeasures
@@ -693,6 +868,11 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
 
        BN_ULONG *np=mont->N.d, *n0=mont->n0;
 
+       /* BN_to_montgomery can contaminate words above .top
+        * [in BN_DEBUG[_DEBUG] build]... */
+       for (i=am.top; i<top; i++)      am.d[i]=0;
+       for (i=tmp.top; i<top; i++)     tmp.d[i]=0;
+
        bn_scatter5(tmp.d,top,powerbuf,0);
        bn_scatter5(am.d,am.top,powerbuf,1);
        bn_mul_mont(tmp.d,am.d,am.d,np,n0,top);
@@ -812,6 +992,15 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p,
        }
 
        /* Convert the final result from montgomery to standard format */
+#if defined(OPENSSL_BN_ASM_MONT) && (defined(__sparc__) || defined(__sparc))
+       if (OPENSSL_sparcv9cap_P[0]&(SPARCV9_VIS3|SPARCV9_PREFER_FPU))
+               {
+               am.d[0] = 1;    /* borrow am */
+               for (i=1;i<top;i++) am.d[i] = 0;
+               if (!BN_mod_mul_montgomery(rr,&tmp,&am,mont,ctx)) goto err;
+               }
+       else
+#endif
        if (!BN_from_montgomery(rr,&tmp,mont,ctx)) goto err;
        ret=1;
 err:
@@ -870,7 +1059,14 @@ int BN_mod_exp_mont_word(BIGNUM *rr, BN_ULONG a, const BIGNUM *p,
        bits = BN_num_bits(p);
        if (bits == 0)
                {
-               ret = BN_one(rr);
+               /* x**0 mod 1 is still zero. */
+               if (BN_is_one(m))
+                       {
+                       ret = 1;
+                       BN_zero(rr);
+                       }
+               else
+                       ret = BN_one(rr);
                return ret;
                }
        if (a == 0)