X-Git-Url: https://git.openssl.org/?p=openssl.git;a=blobdiff_plain;f=crypto%2Fmodes%2Fgcm128.c;h=025c7f889750914ff97385368a00fba5740c4c5d;hp=3d42c6d1ad3c834c37747f542824244bfb7134f5;hb=23a05fa0c146b99731b0e83f3928066536c4b1e0;hpb=2262beef2ee29c79ee39bd7c84b7b8cbb627dfa9 diff --git a/crypto/modes/gcm128.c b/crypto/modes/gcm128.c index 3d42c6d1ad..025c7f8897 100644 --- a/crypto/modes/gcm128.c +++ b/crypto/modes/gcm128.c @@ -47,7 +47,10 @@ * ==================================================================== */ -#include "modes.h" +#define OPENSSL_FIPSAPI + +#include +#include "modes_lcl.h" #include #ifndef MODES_DEBUG @@ -57,80 +60,64 @@ #endif #include -#if (defined(_WIN32) || defined(_WIN64)) && !defined(__MINGW32__) -typedef __int64 i64; -typedef unsigned __int64 u64; -#define U64(C) C##UI64 -#elif defined(__arch64__) -typedef long i64; -typedef unsigned long u64; -#define U64(C) C##UL -#else -typedef long long i64; -typedef unsigned long long u64; -#define U64(C) C##ULL -#endif - -typedef unsigned int u32; -typedef unsigned char u8; -typedef struct { u64 hi,lo; } u128; - -#define STRICT_ALIGNMENT -#if defined(__i386) || defined(__i386__) || \ - defined(__x86_64) || defined(__x86_64__) || \ - defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64) || \ - defined(__s390__) || defined(__s390x__) -# undef STRICT_ALIGNMENT -#endif - -#if defined(__GNUC__) && __GNUC__>=2 -# if defined(__x86_64) || defined(__x86_64__) -# define BSWAP8(x) ({ u64 ret=(x); \ - asm volatile ("bswapq %0" \ - : "+r"(ret)); ret; }) -# define BSWAP4(x) ({ u32 ret=(x); \ - asm volatile ("bswapl %0" \ - : "+r"(ret)); ret; }) -# elif defined(__i386) || defined(__i386__) -# define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x); \ - asm volatile ("bswapl %0; bswapl %1" \ - : "+r"(hi),"+r"(lo)); \ - (u64)hi<<32|lo; }) -# define BSWAP4(x) ({ u32 ret=(x); \ - asm volatile ("bswapl %0" \ - : "+r"(ret)); ret; }) -# endif -#elif defined(_MSC_VER) -# if _MSC_VER>=1300 -# pragma intrinsic(_byteswap_uint64,_byteswap_ulong) -# define BSWAP8(x) _byteswap_uint64((u64)(x)) -# define BSWAP4(x) _byteswap_ulong((u32)(x)) -# elif defined(_M_IX86) -# endif +#if defined(BSWAP4) && defined(STRICT_ALIGNMENT) +/* redefine, because alignment is ensured */ +#undef GETU32 +#define GETU32(p) BSWAP4(*(const u32 *)(p)) +#undef PUTU32 +#define PUTU32(p,v) *(u32 *)(p) = BSWAP4(v) #endif -#ifdef BSWAP4 -#define GETU32(p) BSWAP4(*(const u32 *)(p)) -#define PUTU32(p,v) *(u32 *)(p) = BSWAP4(v) -#else -#define GETU32(p) ((u32)(p)[0]<<24|(u32)(p)[1]<<16|(u32)(p)[2]<<8|(u32)(p)[3]) -#define PUTU32(p,v) ((p)[0]=(u8)((v)>>24),(p)[1]=(u8)((v)>>16),(p)[2]=(u8)((v)>>8),(p)[3]=(u8)(v)) -#endif +#define PACK(s) ((size_t)(s)<<(sizeof(size_t)*8-16)) +#define REDUCE1BIT(V) do { \ + if (sizeof(size_t)==8) { \ + u64 T = U64(0xe100000000000000) & (0-(V.lo&1)); \ + V.lo = (V.hi<<63)|(V.lo>>1); \ + V.hi = (V.hi>>1 )^T; \ + } \ + else { \ + u32 T = 0xe1000000U & (0-(u32)(V.lo&1)); \ + V.lo = (V.hi<<63)|(V.lo>>1); \ + V.hi = (V.hi>>1 )^((u64)T<<32); \ + } \ +} while(0) -#define PACK(s) ((size_t)(s)<<(sizeof(size_t)*8-16)) - -#if 0 /* - * Under ideal conditions 8-bit version should be twice as fast as - * 4-bit one. But world is far from ideal. For gcc-generated x86 code, - * 8-bit was observed to run "only" ~50% faster. On x86_64 observed - * improvement was ~75%, much closer to optimal, but the fact of - * deviation means that references to pre-computed tables end up on - * critical path and as tables are pretty big, 4KB per key+1KB shared, - * execution time is sensitive to cache trashing. It's not actually - * proven, but 4-bit procedure is believed to provide adequate - * all-round performance... - */ + * Even though permitted values for TABLE_BITS are 8, 4 and 1, it should + * never be set to 8. 8 is effectively reserved for testing purposes. + * TABLE_BITS>1 are lookup-table-driven implementations referred to as + * "Shoup's" in GCM specification. In other words OpenSSL does not cover + * whole spectrum of possible table driven implementations. Why? In + * non-"Shoup's" case memory access pattern is segmented in such manner, + * that it's trivial to see that cache timing information can reveal + * fair portion of intermediate hash value. Given that ciphertext is + * always available to attacker, it's possible for him to attempt to + * deduce secret parameter H and if successful, tamper with messages + * [which is nothing but trivial in CTR mode]. In "Shoup's" case it's + * not as trivial, but there is no reason to believe that it's resistant + * to cache-timing attack. And the thing about "8-bit" implementation is + * that it consumes 16 (sixteen) times more memory, 4KB per individual + * key + 1KB shared. Well, on pros side it should be twice as fast as + * "4-bit" version. And for gcc-generated x86[_64] code, "8-bit" version + * was observed to run ~75% faster, closer to 100% for commercial + * compilers... Yet "4-bit" procedure is preferred, because it's + * believed to provide better security-performance balance and adequate + * all-round performance. "All-round" refers to things like: + * + * - shorter setup time effectively improves overall timing for + * handling short messages; + * - larger table allocation can become unbearable because of VM + * subsystem penalties (for example on Windows large enough free + * results in VM working set trimming, meaning that consequent + * malloc would immediately incur working set expansion); + * - larger table has larger cache footprint, which can affect + * performance of other code paths (not necessarily even from same + * thread in Hyper-Threading world); + * + * Value of 1 is not appropriate for performance reasons. + */ +#if TABLE_BITS==8 + static void gcm_init_8bit(u128 Htable[256], u64 H[2]) { int i, j; @@ -142,16 +129,7 @@ static void gcm_init_8bit(u128 Htable[256], u64 H[2]) V.lo = H[1]; for (Htable[128]=V, i=64; i>0; i>>=1) { - if (sizeof(size_t)==8) { - u64 T = U64(0xe100000000000000) & (0-(V.lo&1)); - V.lo = (V.hi<<63)|(V.lo>>1); - V.hi = (V.hi>>1 )^T; - } - else { - u32 T = 0xe1000000U & (0-(u32)(V.lo&1)); - V.lo = (V.hi<<63)|(V.lo>>1); - V.hi = (V.hi>>1) ^((u64)T<<32); - } + REDUCE1BIT(V); Htable[i] = V; } @@ -164,12 +142,13 @@ static void gcm_init_8bit(u128 Htable[256], u64 H[2]) } } -static void gcm_gmult_8bit(u64 Xi[2], u128 Htable[256]) +static void gcm_gmult_8bit(u64 Xi[2], const u128 Htable[256]) { u128 Z = { 0, 0}; const u8 *xi = (const u8 *)Xi+15; size_t rem, n = *xi; const union { long one; char little; } is_endian = {1}; + __fips_constseg static const size_t rem_8bit[256] = { PACK(0x0000), PACK(0x01C2), PACK(0x0384), PACK(0x0246), PACK(0x0708), PACK(0x06CA), PACK(0x048C), PACK(0x054E), @@ -271,36 +250,28 @@ static void gcm_gmult_8bit(u64 Xi[2], u128 Htable[256]) Xi[1] = Z.lo; } } -#endif +#define GCM_MUL(ctx,Xi) gcm_gmult_8bit(ctx->Xi.u,ctx->Htable) -#define _4BIT 1 /* change to 0 to switch to 1-bit multiplication */ +#elif TABLE_BITS==4 -#if _4BIT static void gcm_init_4bit(u128 Htable[16], u64 H[2]) { - int i; u128 V; +#if defined(OPENSSL_SMALL_FOOTPRINT) + int i; +#endif Htable[0].hi = 0; Htable[0].lo = 0; V.hi = H[0]; V.lo = H[1]; +#if defined(OPENSSL_SMALL_FOOTPRINT) for (Htable[8]=V, i=4; i>0; i>>=1) { - if (sizeof(size_t)==8) { - u64 T = U64(0xe100000000000000) & (0-(V.lo&1)); - V.lo = (V.hi<<63)|(V.lo>>1); - V.hi = (V.hi>>1 )^T; - } - else { - u32 T = 0xe1000000U & (0-(u32)(V.lo&1)); - V.lo = (V.hi<<63)|(V.lo>>1); - V.hi = (V.hi>>1 )^((u64)T<<32); - } + REDUCE1BIT(V); Htable[i] = V; } -#if defined(OPENSSL_SMALL_FOOTPRINT) for (i=2; i<16; i<<=1) { u128 *Hi = Htable+i; int j; @@ -310,6 +281,13 @@ static void gcm_init_4bit(u128 Htable[16], u64 H[2]) } } #else + Htable[8] = V; + REDUCE1BIT(V); + Htable[4] = V; + REDUCE1BIT(V); + Htable[2] = V; + REDUCE1BIT(V); + Htable[1] = V; Htable[3].hi = V.hi^Htable[2].hi, Htable[3].lo = V.lo^Htable[2].lo; V=Htable[4]; Htable[5].hi = V.hi^Htable[1].hi, Htable[5].lo = V.lo^Htable[1].lo; @@ -324,16 +302,39 @@ static void gcm_init_4bit(u128 Htable[16], u64 H[2]) Htable[14].hi = V.hi^Htable[6].hi, Htable[14].lo = V.lo^Htable[6].lo; Htable[15].hi = V.hi^Htable[7].hi, Htable[15].lo = V.lo^Htable[7].lo; #endif +#if defined(GHASH_ASM) && (defined(__arm__) || defined(__arm)) + /* + * ARM assembler expects specific dword order in Htable. + */ + { + int j; + const union { long one; char little; } is_endian = {1}; + + if (is_endian.little) + for (j=0;j<16;++j) { + V = Htable[j]; + Htable[j].hi = V.lo; + Htable[j].lo = V.hi; + } + else + for (j=0;j<16;++j) { + V = Htable[j]; + Htable[j].hi = V.lo<<32|V.lo>>32; + Htable[j].lo = V.hi<<32|V.hi>>32; + } + } +#endif } -#ifndef GMULT_ASM +#ifndef GHASH_ASM +__fips_constseg static const size_t rem_4bit[16] = { PACK(0x0000), PACK(0x1C20), PACK(0x3840), PACK(0x2460), PACK(0x7080), PACK(0x6CA0), PACK(0x48C0), PACK(0x54E0), PACK(0xE100), PACK(0xFD20), PACK(0xD940), PACK(0xC560), PACK(0x9180), PACK(0x8DA0), PACK(0xA9C0), PACK(0xB5E0) }; -static void gcm_gmult_4bit(u64 Xi[2], u128 Htable[16]) +static void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]) { u128 Z; int cnt = 15; @@ -399,17 +400,20 @@ static void gcm_gmult_4bit(u64 Xi[2], u128 Htable[16]) #if !defined(OPENSSL_SMALL_FOOTPRINT) /* * Streamed gcm_mult_4bit, see CRYPTO_gcm128_[en|de]crypt for - * details... It doesn't give any performance improvement, at least - * not on x86[_64]. It's here mostly as a placeholder for possible - * future non-trivial optimization[s]... + * details... Compiler-generated code doesn't seem to give any + * performance improvement, at least not on x86[_64]. It's here + * mostly as reference and a placeholder for possible future + * non-trivial optimization[s]... */ -static void gcm_ghash_4bit(const u8 *inp,size_t len,u64 Xi[2], u128 Htable[16]) +static void gcm_ghash_4bit(u64 Xi[2],const u128 Htable[16], + const u8 *inp,size_t len) { u128 Z; int cnt; size_t rem, nlo, nhi; const union { long one; char little; } is_endian = {1}; +#if 1 do { cnt = 15; nlo = ((const u8 *)Xi)[15]; @@ -450,6 +454,100 @@ static void gcm_ghash_4bit(const u8 *inp,size_t len,u64 Xi[2], u128 Htable[16]) Z.hi ^= Htable[nlo].hi; Z.lo ^= Htable[nlo].lo; } +#else + /* + * Extra 256+16 bytes per-key plus 512 bytes shared tables + * [should] give ~50% improvement... One could have PACK()-ed + * the rem_8bit even here, but the priority is to minimize + * cache footprint... + */ + u128 Hshr4[16]; /* Htable shifted right by 4 bits */ + u8 Hshl4[16]; /* Htable shifted left by 4 bits */ + __fips_constseg + static const unsigned short rem_8bit[256] = { + 0x0000, 0x01C2, 0x0384, 0x0246, 0x0708, 0x06CA, 0x048C, 0x054E, + 0x0E10, 0x0FD2, 0x0D94, 0x0C56, 0x0918, 0x08DA, 0x0A9C, 0x0B5E, + 0x1C20, 0x1DE2, 0x1FA4, 0x1E66, 0x1B28, 0x1AEA, 0x18AC, 0x196E, + 0x1230, 0x13F2, 0x11B4, 0x1076, 0x1538, 0x14FA, 0x16BC, 0x177E, + 0x3840, 0x3982, 0x3BC4, 0x3A06, 0x3F48, 0x3E8A, 0x3CCC, 0x3D0E, + 0x3650, 0x3792, 0x35D4, 0x3416, 0x3158, 0x309A, 0x32DC, 0x331E, + 0x2460, 0x25A2, 0x27E4, 0x2626, 0x2368, 0x22AA, 0x20EC, 0x212E, + 0x2A70, 0x2BB2, 0x29F4, 0x2836, 0x2D78, 0x2CBA, 0x2EFC, 0x2F3E, + 0x7080, 0x7142, 0x7304, 0x72C6, 0x7788, 0x764A, 0x740C, 0x75CE, + 0x7E90, 0x7F52, 0x7D14, 0x7CD6, 0x7998, 0x785A, 0x7A1C, 0x7BDE, + 0x6CA0, 0x6D62, 0x6F24, 0x6EE6, 0x6BA8, 0x6A6A, 0x682C, 0x69EE, + 0x62B0, 0x6372, 0x6134, 0x60F6, 0x65B8, 0x647A, 0x663C, 0x67FE, + 0x48C0, 0x4902, 0x4B44, 0x4A86, 0x4FC8, 0x4E0A, 0x4C4C, 0x4D8E, + 0x46D0, 0x4712, 0x4554, 0x4496, 0x41D8, 0x401A, 0x425C, 0x439E, + 0x54E0, 0x5522, 0x5764, 0x56A6, 0x53E8, 0x522A, 0x506C, 0x51AE, + 0x5AF0, 0x5B32, 0x5974, 0x58B6, 0x5DF8, 0x5C3A, 0x5E7C, 0x5FBE, + 0xE100, 0xE0C2, 0xE284, 0xE346, 0xE608, 0xE7CA, 0xE58C, 0xE44E, + 0xEF10, 0xEED2, 0xEC94, 0xED56, 0xE818, 0xE9DA, 0xEB9C, 0xEA5E, + 0xFD20, 0xFCE2, 0xFEA4, 0xFF66, 0xFA28, 0xFBEA, 0xF9AC, 0xF86E, + 0xF330, 0xF2F2, 0xF0B4, 0xF176, 0xF438, 0xF5FA, 0xF7BC, 0xF67E, + 0xD940, 0xD882, 0xDAC4, 0xDB06, 0xDE48, 0xDF8A, 0xDDCC, 0xDC0E, + 0xD750, 0xD692, 0xD4D4, 0xD516, 0xD058, 0xD19A, 0xD3DC, 0xD21E, + 0xC560, 0xC4A2, 0xC6E4, 0xC726, 0xC268, 0xC3AA, 0xC1EC, 0xC02E, + 0xCB70, 0xCAB2, 0xC8F4, 0xC936, 0xCC78, 0xCDBA, 0xCFFC, 0xCE3E, + 0x9180, 0x9042, 0x9204, 0x93C6, 0x9688, 0x974A, 0x950C, 0x94CE, + 0x9F90, 0x9E52, 0x9C14, 0x9DD6, 0x9898, 0x995A, 0x9B1C, 0x9ADE, + 0x8DA0, 0x8C62, 0x8E24, 0x8FE6, 0x8AA8, 0x8B6A, 0x892C, 0x88EE, + 0x83B0, 0x8272, 0x8034, 0x81F6, 0x84B8, 0x857A, 0x873C, 0x86FE, + 0xA9C0, 0xA802, 0xAA44, 0xAB86, 0xAEC8, 0xAF0A, 0xAD4C, 0xAC8E, + 0xA7D0, 0xA612, 0xA454, 0xA596, 0xA0D8, 0xA11A, 0xA35C, 0xA29E, + 0xB5E0, 0xB422, 0xB664, 0xB7A6, 0xB2E8, 0xB32A, 0xB16C, 0xB0AE, + 0xBBF0, 0xBA32, 0xB874, 0xB9B6, 0xBCF8, 0xBD3A, 0xBF7C, 0xBEBE }; + /* + * This pre-processing phase slows down procedure by approximately + * same time as it makes each loop spin faster. In other words + * single block performance is approximately same as straightforward + * "4-bit" implementation, and then it goes only faster... + */ + for (cnt=0; cnt<16; ++cnt) { + Z.hi = Htable[cnt].hi; + Z.lo = Htable[cnt].lo; + Hshr4[cnt].lo = (Z.hi<<60)|(Z.lo>>4); + Hshr4[cnt].hi = (Z.hi>>4); + Hshl4[cnt] = (u8)(Z.lo<<4); + } + + do { + for (Z.lo=0, Z.hi=0, cnt=15; cnt; --cnt) { + nlo = ((const u8 *)Xi)[cnt]; + nlo ^= inp[cnt]; + nhi = nlo>>4; + nlo &= 0xf; + + Z.hi ^= Htable[nlo].hi; + Z.lo ^= Htable[nlo].lo; + + rem = (size_t)Z.lo&0xff; + + Z.lo = (Z.hi<<56)|(Z.lo>>8); + Z.hi = (Z.hi>>8); + + Z.hi ^= Hshr4[nhi].hi; + Z.lo ^= Hshr4[nhi].lo; + Z.hi ^= (u64)rem_8bit[rem^Hshl4[nhi]]<<48; + } + + nlo = ((const u8 *)Xi)[0]; + nlo ^= inp[0]; + nhi = nlo>>4; + nlo &= 0xf; + + Z.hi ^= Htable[nlo].hi; + Z.lo ^= Htable[nlo].lo; + + rem = (size_t)Z.lo&0xf; + + Z.lo = (Z.hi<<60)|(Z.lo>>4); + Z.hi = (Z.hi>>4); + + Z.hi ^= Htable[nhi].hi; + Z.lo ^= Htable[nhi].lo; + Z.hi ^= ((u64)rem_8bit[rem<<4])<<48; +#endif if (is_endian.little) { #ifdef BSWAP8 @@ -472,15 +570,20 @@ static void gcm_ghash_4bit(const u8 *inp,size_t len,u64 Xi[2], u128 Htable[16]) } #endif #else -void gcm_gmult_4bit(u64 Xi[2],u128 Htable[16]); -void gcm_ghash_4bit(const u8 *inp,size_t len,u64 Xi[2],u128 Htable[16]); +void gcm_gmult_4bit(u64 Xi[2],const u128 Htable[16]); +void gcm_ghash_4bit(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len); #endif #define GCM_MUL(ctx,Xi) gcm_gmult_4bit(ctx->Xi.u,ctx->Htable) -#define GHASH(in,len,ctx) gcm_ghash_4bit(in,len,ctx->Xi.u,ctx->Htable) -#define GHASH_CHUNK 1024 +#if defined(GHASH_ASM) || !defined(OPENSSL_SMALL_FOOTPRINT) +#define GHASH(ctx,in,len) gcm_ghash_4bit((ctx)->Xi.u,(ctx)->Htable,in,len) +/* GHASH_CHUNK is "stride parameter" missioned to mitigate cache + * trashing effect. In other words idea is to hash data while it's + * still in L1 cache after encryption pass... */ +#define GHASH_CHUNK (3*1024) +#endif -#else /* !_4BIT */ +#else /* TABLE_BITS */ static void gcm_gmult_1bit(u64 Xi[2],const u64 H[2]) { @@ -516,17 +619,7 @@ static void gcm_gmult_1bit(u64 Xi[2],const u64 H[2]) Z.hi ^= V.hi&M; Z.lo ^= V.lo&M; - if (sizeof(size_t)==8) { - u64 T = U64(0xe100000000000000) & (0-(V.lo&1)); - V.lo = (V.hi<<63)|(V.lo>>1); - V.hi = (V.hi>>1 )^T; - } - else { - u32 T = 0xe1000000U & (0-(u32)(V.lo&1)); - V.lo = (V.hi<<63)|(V.lo>>1); - V.hi = (V.hi>>1 )^((u64)T<<32); - } - + REDUCE1BIT(V); } } @@ -549,19 +642,49 @@ static void gcm_gmult_1bit(u64 Xi[2],const u64 H[2]) } } #define GCM_MUL(ctx,Xi) gcm_gmult_1bit(ctx->Xi.u,ctx->H.u) + +#endif + +#if TABLE_BITS==4 && defined(GHASH_ASM) +# if !defined(I386_ONLY) && \ + (defined(__i386) || defined(__i386__) || \ + defined(__x86_64) || defined(__x86_64__) || \ + defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64)) +# define GHASH_ASM_X86_OR_64 +# define GCM_FUNCREF_4BIT +extern unsigned int OPENSSL_ia32cap_P[2]; + +void gcm_init_clmul(u128 Htable[16],const u64 Xi[2]); +void gcm_gmult_clmul(u64 Xi[2],const u128 Htable[16]); +void gcm_ghash_clmul(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len); + +# if defined(__i386) || defined(__i386__) || defined(_M_IX86) +# define GHASH_ASM_X86 +void gcm_gmult_4bit_mmx(u64 Xi[2],const u128 Htable[16]); +void gcm_ghash_4bit_mmx(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len); + +void gcm_gmult_4bit_x86(u64 Xi[2],const u128 Htable[16]); +void gcm_ghash_4bit_x86(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len); +# endif +# elif defined(__arm__) || defined(__arm) +# include "arm_arch.h" +# if __ARM_ARCH__>=7 +# define GHASH_ASM_ARM +# define GCM_FUNCREF_4BIT +void gcm_gmult_neon(u64 Xi[2],const u128 Htable[16]); +void gcm_ghash_neon(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len); +# endif +# endif #endif -typedef struct { - /* Following 6 names follow names in GCM specification */ - union { u64 u[2]; u32 d[4]; u8 c[16]; } Yi,EKi,EK0, - Xi,H, - len; - /* Pre-computed table used by gcm_gmult_4bit */ - u128 Htable[16]; - unsigned int res, ctr; - block128_f block; - void *key; -} GCM128_CONTEXT; +#ifdef GCM_FUNCREF_4BIT +# undef GCM_MUL +# define GCM_MUL(ctx,Xi) (*gcm_gmult_p)(ctx->Xi.u,ctx->Htable) +# ifdef GHASH +# undef GHASH +# define GHASH(ctx,in,len) (*gcm_ghash_p)(ctx->Xi.u,ctx->Htable,in,len) +# endif +#endif void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx,void *key,block128_f block) { @@ -588,25 +711,72 @@ void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx,void *key,block128_f block) #endif } +#if TABLE_BITS==8 + gcm_init_8bit(ctx->Htable,ctx->H.u); +#elif TABLE_BITS==4 +# if defined(GHASH_ASM_X86_OR_64) +# if !defined(GHASH_ASM_X86) || defined(OPENSSL_IA32_SSE2) + if (OPENSSL_ia32cap_P[0]&(1<<24) && /* check FXSR bit */ + OPENSSL_ia32cap_P[1]&(1<<1) ) { /* check PCLMULQDQ bit */ + gcm_init_clmul(ctx->Htable,ctx->H.u); + ctx->gmult = gcm_gmult_clmul; + ctx->ghash = gcm_ghash_clmul; + return; + } +# endif + gcm_init_4bit(ctx->Htable,ctx->H.u); +# if defined(GHASH_ASM_X86) /* x86 only */ +# if defined(OPENSSL_IA32_SSE2) + if (OPENSSL_ia32cap_P[0]&(1<<25)) { /* check SSE bit */ +# else + if (OPENSSL_ia32cap_P[0]&(1<<23)) { /* check MMX bit */ +# endif + ctx->gmult = gcm_gmult_4bit_mmx; + ctx->ghash = gcm_ghash_4bit_mmx; + } else { + ctx->gmult = gcm_gmult_4bit_x86; + ctx->ghash = gcm_ghash_4bit_x86; + } +# else + ctx->gmult = gcm_gmult_4bit; + ctx->ghash = gcm_ghash_4bit; +# endif +# elif defined(GHASH_ASM_ARM) + if (OPENSSL_armcap_P & ARMV7_NEON) { + ctx->gmult = gcm_gmult_neon; + ctx->ghash = gcm_ghash_neon; + } else { + gcm_init_4bit(ctx->Htable,ctx->H.u); + ctx->gmult = gcm_gmult_4bit; + ctx->ghash = gcm_ghash_4bit; + } +# else gcm_init_4bit(ctx->Htable,ctx->H.u); +# endif +#endif } void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx,const unsigned char *iv,size_t len) { const union { long one; char little; } is_endian = {1}; + unsigned int ctr; +#ifdef GCM_FUNCREF_4BIT + void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult; +#endif ctx->Yi.u[0] = 0; ctx->Yi.u[1] = 0; ctx->Xi.u[0] = 0; ctx->Xi.u[1] = 0; - ctx->len.u[0] = 0; - ctx->len.u[1] = 0; - ctx->res = 0; + ctx->len.u[0] = 0; /* AAD length */ + ctx->len.u[1] = 0; /* message length */ + ctx->ares = 0; + ctx->mres = 0; if (len==12) { memcpy(ctx->Yi.c,iv,12); ctx->Yi.c[15]=1; - ctx->ctr=1; + ctr=1; } else { size_t i; @@ -643,28 +813,56 @@ void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx,const unsigned char *iv,size_t len) GCM_MUL(ctx,Yi); if (is_endian.little) - ctx->ctr = GETU32(ctx->Yi.c+12); + ctr = GETU32(ctx->Yi.c+12); else - ctx->ctr = ctx->Yi.d[3]; + ctr = ctx->Yi.d[3]; } (*ctx->block)(ctx->Yi.c,ctx->EK0.c,ctx->key); - ++ctx->ctr; + ++ctr; if (is_endian.little) - PUTU32(ctx->Yi.c+12,ctx->ctr); + PUTU32(ctx->Yi.c+12,ctr); else - ctx->Yi.d[3] = ctx->ctr; + ctx->Yi.d[3] = ctr; } -void CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx,const unsigned char *aad,size_t len) +int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx,const unsigned char *aad,size_t len) { size_t i; + unsigned int n; + u64 alen = ctx->len.u[0]; +#ifdef GCM_FUNCREF_4BIT + void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult; +# ifdef GHASH + void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16], + const u8 *inp,size_t len) = ctx->ghash; +# endif +#endif + + if (ctx->len.u[1]) return -2; - ctx->len.u[0] += len; + alen += len; + if (alen>(U64(1)<<61) || (sizeof(len)==8 && alenlen.u[0] = alen; + + n = ctx->ares; + if (n) { + while (n && len) { + ctx->Xi.c[n] ^= *(aad++); + --len; + n = (n+1)%16; + } + if (n==0) GCM_MUL(ctx,Xi); + else { + ctx->ares = n; + return 0; + } + } #ifdef GHASH if ((i = (len&(size_t)-16))) { - GHASH(aad,i,ctx); + GHASH(ctx,aad,i); aad += i; len -= i; } @@ -676,25 +874,53 @@ void CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx,const unsigned char *aad,size_t len) len -= 16; } #endif - if (len) { + n = (unsigned int)len; for (i=0; iXi.c[i] ^= aad[i]; - GCM_MUL(ctx,Xi); } + + ctx->ares = n; + return 0; } -void CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, +int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const unsigned char *in, unsigned char *out, size_t len) { const union { long one; char little; } is_endian = {1}; unsigned int n, ctr; size_t i; + u64 mlen = ctx->len.u[1]; + block128_f block = ctx->block; + void *key = ctx->key; +#ifdef GCM_FUNCREF_4BIT + void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult; +# ifdef GHASH + void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16], + const u8 *inp,size_t len) = ctx->ghash; +# endif +#endif + +#if 0 + n = (unsigned int)mlen%16; /* alternative to ctx->mres */ +#endif + mlen += len; + if (mlen>((U64(1)<<36)-32) || (sizeof(len)==8 && mlenlen.u[1] = mlen; + + if (ctx->ares) { + /* First call to encrypt finalizes GHASH(AAD) */ + GCM_MUL(ctx,Xi); + ctx->ares = 0; + } - ctx->len.u[1] += len; - n = ctx->res; - ctr = ctx->ctr; + if (is_endian.little) + ctr = GETU32(ctx->Yi.c+12); + else + ctr = ctx->Yi.d[3]; + n = ctx->mres; #if !defined(OPENSSL_SMALL_FOOTPRINT) if (16%sizeof(size_t) == 0) do { /* always true actually */ if (n) { @@ -705,20 +931,20 @@ void CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, } if (n==0) GCM_MUL(ctx,Xi); else { - ctx->res = n; - return; + ctx->mres = n; + return 0; } } #if defined(STRICT_ALIGNMENT) if (((size_t)in|(size_t)out)%sizeof(size_t) != 0) break; #endif -#ifdef GHASH +#if defined(GHASH) && defined(GHASH_CHUNK) while (len>=GHASH_CHUNK) { size_t j=GHASH_CHUNK; while (j) { - (*ctx->block)(ctx->Yi.c,ctx->EKi.c,ctx->key); + (*block)(ctx->Yi.c,ctx->EKi.c,key); ++ctr; if (is_endian.little) PUTU32(ctx->Yi.c+12,ctr); @@ -731,14 +957,14 @@ void CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, in += 16; j -= 16; } - GHASH(out-GHASH_CHUNK,GHASH_CHUNK,ctx); + GHASH(ctx,out-GHASH_CHUNK,GHASH_CHUNK); len -= GHASH_CHUNK; } if ((i = (len&(size_t)-16))) { size_t j=i; while (len>=16) { - (*ctx->block)(ctx->Yi.c,ctx->EKi.c,ctx->key); + (*block)(ctx->Yi.c,ctx->EKi.c,key); ++ctr; if (is_endian.little) PUTU32(ctx->Yi.c+12,ctr); @@ -751,11 +977,11 @@ void CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, in += 16; len -= 16; } - GHASH(out-j,j,ctx); + GHASH(ctx,out-j,j); } #else while (len>=16) { - (*ctx->block)(ctx->Yi.c,ctx->EKi.c,ctx->key); + (*block)(ctx->Yi.c,ctx->EKi.c,key); ++ctr; if (is_endian.little) PUTU32(ctx->Yi.c+12,ctr); @@ -772,7 +998,7 @@ void CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, } #endif if (len) { - (*ctx->block)(ctx->Yi.c,ctx->EKi.c,ctx->key); + (*block)(ctx->Yi.c,ctx->EKi.c,key); ++ctr; if (is_endian.little) PUTU32(ctx->Yi.c+12,ctr); @@ -784,14 +1010,13 @@ void CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, } } - ctx->res = n; - ctx->ctr = ctr; - return; + ctx->mres = n; + return 0; } while(0); #endif for (i=0;iblock)(ctx->Yi.c,ctx->EKi.c,ctx->key); + (*block)(ctx->Yi.c,ctx->EKi.c,key); ++ctr; if (is_endian.little) PUTU32(ctx->Yi.c+12,ctr); @@ -804,22 +1029,45 @@ void CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, GCM_MUL(ctx,Xi); } - ctx->res = n; - ctx->ctr = ctr; + ctx->mres = n; + return 0; } -void CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, +int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const unsigned char *in, unsigned char *out, size_t len) { const union { long one; char little; } is_endian = {1}; unsigned int n, ctr; size_t i; + u64 mlen = ctx->len.u[1]; + block128_f block = ctx->block; + void *key = ctx->key; +#ifdef GCM_FUNCREF_4BIT + void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult; +# ifdef GHASH + void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16], + const u8 *inp,size_t len) = ctx->ghash; +# endif +#endif + + mlen += len; + if (mlen>((U64(1)<<36)-32) || (sizeof(len)==8 && mlenlen.u[1] = mlen; - ctx->len.u[1] += len; - n = ctx->res; - ctr = ctx->ctr; + if (ctx->ares) { + /* First call to decrypt finalizes GHASH(AAD) */ + GCM_MUL(ctx,Xi); + ctx->ares = 0; + } + + if (is_endian.little) + ctr = GETU32(ctx->Yi.c+12); + else + ctr = ctx->Yi.d[3]; + n = ctx->mres; #if !defined(OPENSSL_SMALL_FOOTPRINT) if (16%sizeof(size_t) == 0) do { /* always true actually */ if (n) { @@ -832,21 +1080,21 @@ void CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, } if (n==0) GCM_MUL (ctx,Xi); else { - ctx->res = n; - return; + ctx->mres = n; + return 0; } } #if defined(STRICT_ALIGNMENT) if (((size_t)in|(size_t)out)%sizeof(size_t) != 0) break; #endif -#ifdef GHASH +#if defined(GHASH) && defined(GHASH_CHUNK) while (len>=GHASH_CHUNK) { size_t j=GHASH_CHUNK; - GHASH(in,GHASH_CHUNK,ctx); + GHASH(ctx,in,GHASH_CHUNK); while (j) { - (*ctx->block)(ctx->Yi.c,ctx->EKi.c,ctx->key); + (*block)(ctx->Yi.c,ctx->EKi.c,key); ++ctr; if (is_endian.little) PUTU32(ctx->Yi.c+12,ctr); @@ -862,9 +1110,9 @@ void CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, len -= GHASH_CHUNK; } if ((i = (len&(size_t)-16))) { - GHASH(in,i,ctx); + GHASH(ctx,in,i); while (len>=16) { - (*ctx->block)(ctx->Yi.c,ctx->EKi.c,ctx->key); + (*block)(ctx->Yi.c,ctx->EKi.c,key); ++ctr; if (is_endian.little) PUTU32(ctx->Yi.c+12,ctr); @@ -880,7 +1128,7 @@ void CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, } #else while (len>=16) { - (*ctx->block)(ctx->Yi.c,ctx->EKi.c,ctx->key); + (*block)(ctx->Yi.c,ctx->EKi.c,key); ++ctr; if (is_endian.little) PUTU32(ctx->Yi.c+12,ctr); @@ -898,7 +1146,7 @@ void CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, } #endif if (len) { - (*ctx->block)(ctx->Yi.c,ctx->EKi.c,ctx->key); + (*block)(ctx->Yi.c,ctx->EKi.c,key); ++ctr; if (is_endian.little) PUTU32(ctx->Yi.c+12,ctr); @@ -912,15 +1160,14 @@ void CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, } } - ctx->res = n; - ctx->ctr = ctr; - return; + ctx->mres = n; + return 0; } while(0); #endif for (i=0;iblock)(ctx->Yi.c,ctx->EKi.c,ctx->key); + (*block)(ctx->Yi.c,ctx->EKi.c,key); ++ctr; if (is_endian.little) PUTU32(ctx->Yi.c+12,ctr); @@ -928,24 +1175,233 @@ void CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, ctx->Yi.d[3] = ctr; } c = in[i]; - out[i] ^= ctx->EKi.c[n]; + out[i] = c^ctx->EKi.c[n]; ctx->Xi.c[n] ^= c; n = (n+1)%16; if (n==0) GCM_MUL(ctx,Xi); } - ctx->res = n; - ctx->ctr = ctr; + ctx->mres = n; + return 0; +} + +int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, + const unsigned char *in, unsigned char *out, + size_t len, ctr128_f stream) +{ + const union { long one; char little; } is_endian = {1}; + unsigned int n, ctr; + size_t i; + u64 mlen = ctx->len.u[1]; + void *key = ctx->key; +#ifdef GCM_FUNCREF_4BIT + void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult; +# ifdef GHASH + void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16], + const u8 *inp,size_t len) = ctx->ghash; +# endif +#endif + + mlen += len; + if (mlen>((U64(1)<<36)-32) || (sizeof(len)==8 && mlenlen.u[1] = mlen; + + if (ctx->ares) { + /* First call to encrypt finalizes GHASH(AAD) */ + GCM_MUL(ctx,Xi); + ctx->ares = 0; + } + + if (is_endian.little) + ctr = GETU32(ctx->Yi.c+12); + else + ctr = ctx->Yi.d[3]; + + n = ctx->mres; + if (n) { + while (n && len) { + ctx->Xi.c[n] ^= *(out++) = *(in++)^ctx->EKi.c[n]; + --len; + n = (n+1)%16; + } + if (n==0) GCM_MUL(ctx,Xi); + else { + ctx->mres = n; + return 0; + } + } +#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT) + while (len>=GHASH_CHUNK) { + (*stream)(in,out,GHASH_CHUNK/16,key,ctx->Yi.c); + ctr += GHASH_CHUNK/16; + if (is_endian.little) + PUTU32(ctx->Yi.c+12,ctr); + else + ctx->Yi.d[3] = ctr; + GHASH(ctx,out,GHASH_CHUNK); + out += GHASH_CHUNK; + in += GHASH_CHUNK; + len -= GHASH_CHUNK; + } +#endif + if ((i = (len&(size_t)-16))) { + size_t j=i/16; + + (*stream)(in,out,j,key,ctx->Yi.c); + ctr += (unsigned int)j; + if (is_endian.little) + PUTU32(ctx->Yi.c+12,ctr); + else + ctx->Yi.d[3] = ctr; + in += i; + len -= i; +#if defined(GHASH) + GHASH(ctx,out,i); + out += i; +#else + while (j--) { + for (i=0;i<16;++i) ctx->Xi.c[i] ^= out[i]; + GCM_MUL(ctx,Xi); + out += 16; + } +#endif + } + if (len) { + (*ctx->block)(ctx->Yi.c,ctx->EKi.c,key); + ++ctr; + if (is_endian.little) + PUTU32(ctx->Yi.c+12,ctr); + else + ctx->Yi.d[3] = ctr; + while (len--) { + ctx->Xi.c[n] ^= out[n] = in[n]^ctx->EKi.c[n]; + ++n; + } + } + + ctx->mres = n; + return 0; } -void CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx) +int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, + const unsigned char *in, unsigned char *out, + size_t len,ctr128_f stream) +{ + const union { long one; char little; } is_endian = {1}; + unsigned int n, ctr; + size_t i; + u64 mlen = ctx->len.u[1]; + void *key = ctx->key; +#ifdef GCM_FUNCREF_4BIT + void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult; +# ifdef GHASH + void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16], + const u8 *inp,size_t len) = ctx->ghash; +# endif +#endif + + mlen += len; + if (mlen>((U64(1)<<36)-32) || (sizeof(len)==8 && mlenlen.u[1] = mlen; + + if (ctx->ares) { + /* First call to decrypt finalizes GHASH(AAD) */ + GCM_MUL(ctx,Xi); + ctx->ares = 0; + } + + if (is_endian.little) + ctr = GETU32(ctx->Yi.c+12); + else + ctr = ctx->Yi.d[3]; + + n = ctx->mres; + if (n) { + while (n && len) { + u8 c = *(in++); + *(out++) = c^ctx->EKi.c[n]; + ctx->Xi.c[n] ^= c; + --len; + n = (n+1)%16; + } + if (n==0) GCM_MUL (ctx,Xi); + else { + ctx->mres = n; + return 0; + } + } +#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT) + while (len>=GHASH_CHUNK) { + GHASH(ctx,in,GHASH_CHUNK); + (*stream)(in,out,GHASH_CHUNK/16,key,ctx->Yi.c); + ctr += GHASH_CHUNK/16; + if (is_endian.little) + PUTU32(ctx->Yi.c+12,ctr); + else + ctx->Yi.d[3] = ctr; + out += GHASH_CHUNK; + in += GHASH_CHUNK; + len -= GHASH_CHUNK; + } +#endif + if ((i = (len&(size_t)-16))) { + size_t j=i/16; + +#if defined(GHASH) + GHASH(ctx,in,i); +#else + while (j--) { + size_t k; + for (k=0;k<16;++k) ctx->Xi.c[k] ^= in[k]; + GCM_MUL(ctx,Xi); + in += 16; + } + j = i/16; + in -= i; +#endif + (*stream)(in,out,j,key,ctx->Yi.c); + ctr += (unsigned int)j; + if (is_endian.little) + PUTU32(ctx->Yi.c+12,ctr); + else + ctx->Yi.d[3] = ctr; + out += i; + in += i; + len -= i; + } + if (len) { + (*ctx->block)(ctx->Yi.c,ctx->EKi.c,key); + ++ctr; + if (is_endian.little) + PUTU32(ctx->Yi.c+12,ctr); + else + ctx->Yi.d[3] = ctr; + while (len--) { + u8 c = in[n]; + ctx->Xi.c[n] ^= c; + out[n] = c^ctx->EKi.c[n]; + ++n; + } + } + + ctx->mres = n; + return 0; +} + +int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx,const unsigned char *tag, + size_t len) { const union { long one; char little; } is_endian = {1}; u64 alen = ctx->len.u[0]<<3; u64 clen = ctx->len.u[1]<<3; +#ifdef GCM_FUNCREF_4BIT + void (*gcm_gmult_p)(u64 Xi[2],const u128 Htable[16]) = ctx->gmult; +#endif - if (ctx->res) + if (ctx->mres) GCM_MUL(ctx,Xi); if (is_endian.little) { @@ -969,6 +1425,35 @@ void CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx) ctx->Xi.u[0] ^= ctx->EK0.u[0]; ctx->Xi.u[1] ^= ctx->EK0.u[1]; + + if (tag && len<=sizeof(ctx->Xi)) + return memcmp(ctx->Xi.c,tag,len); + else + return -1; +} + +void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len) +{ + CRYPTO_gcm128_finish(ctx, NULL, 0); + memcpy(tag, ctx->Xi.c, len<=sizeof(ctx->Xi.c)?len:sizeof(ctx->Xi.c)); +} + +GCM128_CONTEXT *CRYPTO_gcm128_new(void *key, block128_f block) +{ + GCM128_CONTEXT *ret; + + if ((ret = (GCM128_CONTEXT *)OPENSSL_malloc(sizeof(GCM128_CONTEXT)))) + CRYPTO_gcm128_init(ret,key,block); + + return ret; +} + +void CRYPTO_gcm128_release(GCM128_CONTEXT *ctx) +{ + if (ctx) { + OPENSSL_cleanse(ctx,sizeof(*ctx)); + OPENSSL_free(ctx); + } } #if defined(SELFTEST) @@ -982,6 +1467,7 @@ static const u8 K1[16], IV1[12], *C1=NULL, T1[]= {0x58,0xe2,0xfc,0xce,0xfa,0x7e,0x30,0x61,0x36,0x7f,0x1d,0x57,0xa4,0xe7,0x45,0x5a}; + /* Test Case 2 */ #define K2 K1 #define A2 A1 @@ -1002,7 +1488,7 @@ static const u8 K3[]= {0xfe,0xff,0xe9,0x92,0x86,0x65,0x73,0x1c,0x6d,0x6a,0x8f,0 0xe3,0xaa,0x21,0x2f,0x2c,0x02,0xa4,0xe0,0x35,0xc1,0x7e,0x23,0x29,0xac,0xa1,0x2e, 0x21,0xd5,0x14,0xb2,0x54,0x66,0x93,0x1c,0x7d,0x8f,0x6a,0x5a,0xac,0x84,0xaa,0x05, 0x1b,0xa3,0x0b,0x39,0x6a,0x0a,0xac,0x97,0x3d,0x58,0xe0,0x91,0x47,0x3f,0x59,0x85}, - T3[]= {0x4d,0x5c,0x2a,0xf3,0x27,0xcd,0x64,0xa6,0x2c,0xf3,0x5a,0xbd,0x2b,0xa6,0xfa,0xb4,}; + T3[]= {0x4d,0x5c,0x2a,0xf3,0x27,0xcd,0x64,0xa6,0x2c,0xf3,0x5a,0xbd,0x2b,0xa6,0xfa,0xb4}; /* Test Case 4 */ #define K4 K3 @@ -1022,14 +1508,14 @@ static const u8 P4[]= {0xd9,0x31,0x32,0x25,0xf8,0x84,0x06,0xe5,0xa5,0x59,0x09,0 /* Test Case 5 */ #define K5 K4 #define P5 P4 -static const u8 A5[]= {0xfe,0xed,0xfa,0xce,0xde,0xad,0xbe,0xef,0xfe,0xed,0xfa,0xce,0xde,0xad,0xbe,0xef, - 0xab,0xad,0xda,0xd2}, - IV5[]= {0xca,0xfe,0xba,0xbe,0xfa,0xce,0xdb,0xad}, +#define A5 A4 +static const u8 IV5[]= {0xca,0xfe,0xba,0xbe,0xfa,0xce,0xdb,0xad}, C5[]= {0x61,0x35,0x3b,0x4c,0x28,0x06,0x93,0x4a,0x77,0x7f,0xf5,0x1f,0xa2,0x2a,0x47,0x55, 0x69,0x9b,0x2a,0x71,0x4f,0xcd,0xc6,0xf8,0x37,0x66,0xe5,0xf9,0x7b,0x6c,0x74,0x23, 0x73,0x80,0x69,0x00,0xe4,0x9f,0x24,0xb2,0x2b,0x09,0x75,0x44,0xd4,0x89,0x6b,0x42, 0x49,0x89,0xb5,0xe1,0xeb,0xac,0x0f,0x07,0xc2,0x3f,0x45,0x98}, T5[]= {0x36,0x12,0xd2,0xe7,0x9e,0x3b,0x07,0x85,0x56,0x1b,0xe1,0x4a,0xac,0xa2,0xfc,0xcb}; + /* Test Case 6 */ #define K6 K5 #define P6 P5 @@ -1191,17 +1677,19 @@ static const u8 IV18[]={0x93,0x13,0x22,0x5d,0xf8,0x84,0x06,0xe5,0x55,0x90,0x9c,0 AES_set_encrypt_key(K##n,sizeof(K##n)*8,&key); \ CRYPTO_gcm128_init(&ctx,&key,(block128_f)AES_encrypt); \ CRYPTO_gcm128_setiv(&ctx,IV##n,sizeof(IV##n)); \ + memset(out,0,sizeof(out)); \ if (A##n) CRYPTO_gcm128_aad(&ctx,A##n,sizeof(A##n)); \ if (P##n) CRYPTO_gcm128_encrypt(&ctx,P##n,out,sizeof(out)); \ - CRYPTO_gcm128_finish(&ctx); \ - if (memcmp(ctx.Xi.c,T##n,16) || (C##n && memcmp(out,C##n,sizeof(out)))) \ - ret++, printf ("encrypt test#%d failed.\n",n);\ + if (CRYPTO_gcm128_finish(&ctx,T##n,16) || \ + (C##n && memcmp(out,C##n,sizeof(out)))) \ + ret++, printf ("encrypt test#%d failed.\n",n); \ CRYPTO_gcm128_setiv(&ctx,IV##n,sizeof(IV##n)); \ + memset(out,0,sizeof(out)); \ if (A##n) CRYPTO_gcm128_aad(&ctx,A##n,sizeof(A##n)); \ if (C##n) CRYPTO_gcm128_decrypt(&ctx,C##n,out,sizeof(out)); \ - CRYPTO_gcm128_finish(&ctx); \ - if (memcmp(ctx.Xi.c,T##n,16) || (P##n && memcmp(out,P##n,sizeof(out)))) \ - ret++, printf ("decrypt test#%d failed.\n",n);\ + if (CRYPTO_gcm128_finish(&ctx,T##n,16) || \ + (P##n && memcmp(out,P##n,sizeof(out)))) \ + ret++, printf ("decrypt test#%d failed.\n",n); \ } while(0) int main() @@ -1229,6 +1717,7 @@ int main() TEST_CASE(17); TEST_CASE(18); +#ifdef OPENSSL_CPUID_OBJ { size_t start,stop,gcm_t,ctr_t,OPENSSL_rdtsc(); union { u64 u; u8 c[1024]; } buf; @@ -1244,19 +1733,32 @@ int main() gcm_t = OPENSSL_rdtsc() - start; CRYPTO_ctr128_encrypt(buf.c,buf.c,sizeof(buf), - &key,ctx.Yi.c,ctx.EKi.c,&ctx.res, + &key,ctx.Yi.c,ctx.EKi.c,&ctx.mres, (block128_f)AES_encrypt); start = OPENSSL_rdtsc(); CRYPTO_ctr128_encrypt(buf.c,buf.c,sizeof(buf), - &key,ctx.Yi.c,ctx.EKi.c,&ctx.res, - (block128_f)AES_encrypt); + &key,ctx.Yi.c,ctx.EKi.c,&ctx.mres, + (block128_f)AES_encrypt); ctr_t = OPENSSL_rdtsc() - start; printf("%.2f-%.2f=%.2f\n", gcm_t/(double)sizeof(buf), ctr_t/(double)sizeof(buf), (gcm_t-ctr_t)/(double)sizeof(buf)); +#ifdef GHASH + { + void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16], + const u8 *inp,size_t len) = ctx.ghash; + + GHASH((&ctx),buf.c,sizeof(buf)); + start = OPENSSL_rdtsc(); + for (i=0;i<100;++i) GHASH((&ctx),buf.c,sizeof(buf)); + gcm_t = OPENSSL_rdtsc() - start; + printf("%.2f\n",gcm_t/(double)sizeof(buf)/(double)i); } +#endif + } +#endif return ret; }