2 * Copyright 2010-2023 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
11 #include <openssl/crypto.h>
12 #include "internal/cryptlib.h"
13 #include "internal/endian.h"
14 #include "crypto/modes.h"
16 #if defined(__GNUC__) && !defined(STRICT_ALIGNMENT)
17 typedef size_t size_t_aX __attribute((__aligned__(1)));
19 typedef size_t size_t_aX;
22 #if defined(BSWAP4) && defined(STRICT_ALIGNMENT)
23 /* redefine, because alignment is ensured */
25 # define GETU32(p) BSWAP4(*(const u32 *)(p))
27 # define PUTU32(p,v) *(u32 *)(p) = BSWAP4(v)
30 /* RISC-V uses C implementation as a fallback. */
32 # define INCLUDE_C_GMULT_4BIT
33 # define INCLUDE_C_GHASH_4BIT
36 #define PACK(s) ((size_t)(s)<<(sizeof(size_t)*8-16))
37 #define REDUCE1BIT(V) do { \
38 if (sizeof(size_t)==8) { \
39 u64 T = U64(0xe100000000000000) & (0-(V.lo&1)); \
40 V.lo = (V.hi<<63)|(V.lo>>1); \
41 V.hi = (V.hi>>1 )^T; \
44 u32 T = 0xe1000000U & (0-(u32)(V.lo&1)); \
45 V.lo = (V.hi<<63)|(V.lo>>1); \
46 V.hi = (V.hi>>1 )^((u64)T<<32); \
52 * NOTE: TABLE_BITS and all non-4bit implementations have been removed in 3.1.
54 * Even though permitted values for TABLE_BITS are 8, 4 and 1, it should
55 * never be set to 8. 8 is effectively reserved for testing purposes.
56 * TABLE_BITS>1 are lookup-table-driven implementations referred to as
57 * "Shoup's" in GCM specification. In other words OpenSSL does not cover
58 * whole spectrum of possible table driven implementations. Why? In
59 * non-"Shoup's" case memory access pattern is segmented in such manner,
60 * that it's trivial to see that cache timing information can reveal
61 * fair portion of intermediate hash value. Given that ciphertext is
62 * always available to attacker, it's possible for him to attempt to
63 * deduce secret parameter H and if successful, tamper with messages
64 * [which is nothing but trivial in CTR mode]. In "Shoup's" case it's
65 * not as trivial, but there is no reason to believe that it's resistant
66 * to cache-timing attack. And the thing about "8-bit" implementation is
67 * that it consumes 16 (sixteen) times more memory, 4KB per individual
68 * key + 1KB shared. Well, on pros side it should be twice as fast as
69 * "4-bit" version. And for gcc-generated x86[_64] code, "8-bit" version
70 * was observed to run ~75% faster, closer to 100% for commercial
71 * compilers... Yet "4-bit" procedure is preferred, because it's
72 * believed to provide better security-performance balance and adequate
73 * all-round performance. "All-round" refers to things like:
75 * - shorter setup time effectively improves overall timing for
76 * handling short messages;
77 * - larger table allocation can become unbearable because of VM
78 * subsystem penalties (for example on Windows large enough free
79 * results in VM working set trimming, meaning that consequent
80 * malloc would immediately incur working set expansion);
81 * - larger table has larger cache footprint, which can affect
82 * performance of other code paths (not necessarily even from same
83 * thread in Hyper-Threading world);
85 * Value of 1 is not appropriate for performance reasons.
88 static void gcm_init_4bit(u128 Htable[16], const u64 H[2])
91 # if defined(OPENSSL_SMALL_FOOTPRINT)
100 # if defined(OPENSSL_SMALL_FOOTPRINT)
101 for (Htable[8] = V, i = 4; i > 0; i >>= 1) {
106 for (i = 2; i < 16; i <<= 1) {
107 u128 *Hi = Htable + i;
109 for (V = *Hi, j = 1; j < i; ++j) {
110 Hi[j].hi = V.hi ^ Htable[j].hi;
111 Hi[j].lo = V.lo ^ Htable[j].lo;
122 Htable[3].hi = V.hi ^ Htable[2].hi, Htable[3].lo = V.lo ^ Htable[2].lo;
124 Htable[5].hi = V.hi ^ Htable[1].hi, Htable[5].lo = V.lo ^ Htable[1].lo;
125 Htable[6].hi = V.hi ^ Htable[2].hi, Htable[6].lo = V.lo ^ Htable[2].lo;
126 Htable[7].hi = V.hi ^ Htable[3].hi, Htable[7].lo = V.lo ^ Htable[3].lo;
128 Htable[9].hi = V.hi ^ Htable[1].hi, Htable[9].lo = V.lo ^ Htable[1].lo;
129 Htable[10].hi = V.hi ^ Htable[2].hi, Htable[10].lo = V.lo ^ Htable[2].lo;
130 Htable[11].hi = V.hi ^ Htable[3].hi, Htable[11].lo = V.lo ^ Htable[3].lo;
131 Htable[12].hi = V.hi ^ Htable[4].hi, Htable[12].lo = V.lo ^ Htable[4].lo;
132 Htable[13].hi = V.hi ^ Htable[5].hi, Htable[13].lo = V.lo ^ Htable[5].lo;
133 Htable[14].hi = V.hi ^ Htable[6].hi, Htable[14].lo = V.lo ^ Htable[6].lo;
134 Htable[15].hi = V.hi ^ Htable[7].hi, Htable[15].lo = V.lo ^ Htable[7].lo;
136 # if defined(GHASH_ASM) && (defined(__arm__) || defined(__arm))
138 * ARM assembler expects specific dword order in Htable.
144 if (IS_LITTLE_ENDIAN)
145 for (j = 0; j < 16; ++j) {
150 for (j = 0; j < 16; ++j) {
152 Htable[j].hi = V.lo << 32 | V.lo >> 32;
153 Htable[j].lo = V.hi << 32 | V.hi >> 32;
159 # if !defined(GHASH_ASM) || defined(INCLUDE_C_GMULT_4BIT)
160 static const size_t rem_4bit[16] = {
161 PACK(0x0000), PACK(0x1C20), PACK(0x3840), PACK(0x2460),
162 PACK(0x7080), PACK(0x6CA0), PACK(0x48C0), PACK(0x54E0),
163 PACK(0xE100), PACK(0xFD20), PACK(0xD940), PACK(0xC560),
164 PACK(0x9180), PACK(0x8DA0), PACK(0xA9C0), PACK(0xB5E0)
167 static void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16])
171 size_t rem, nlo, nhi;
174 nlo = ((const u8 *)Xi)[15];
178 Z.hi = Htable[nlo].hi;
179 Z.lo = Htable[nlo].lo;
182 rem = (size_t)Z.lo & 0xf;
183 Z.lo = (Z.hi << 60) | (Z.lo >> 4);
185 if (sizeof(size_t) == 8)
186 Z.hi ^= rem_4bit[rem];
188 Z.hi ^= (u64)rem_4bit[rem] << 32;
190 Z.hi ^= Htable[nhi].hi;
191 Z.lo ^= Htable[nhi].lo;
196 nlo = ((const u8 *)Xi)[cnt];
200 rem = (size_t)Z.lo & 0xf;
201 Z.lo = (Z.hi << 60) | (Z.lo >> 4);
203 if (sizeof(size_t) == 8)
204 Z.hi ^= rem_4bit[rem];
206 Z.hi ^= (u64)rem_4bit[rem] << 32;
208 Z.hi ^= Htable[nlo].hi;
209 Z.lo ^= Htable[nlo].lo;
212 if (IS_LITTLE_ENDIAN) {
214 Xi[0] = BSWAP8(Z.hi);
215 Xi[1] = BSWAP8(Z.lo);
219 v = (u32)(Z.hi >> 32);
223 v = (u32)(Z.lo >> 32);
236 # if !defined(GHASH_ASM) || defined(INCLUDE_C_GHASH_4BIT)
237 # if !defined(OPENSSL_SMALL_FOOTPRINT)
239 * Streamed gcm_mult_4bit, see CRYPTO_gcm128_[en|de]crypt for
240 * details... Compiler-generated code doesn't seem to give any
241 * performance improvement, at least not on x86[_64]. It's here
242 * mostly as reference and a placeholder for possible future
243 * non-trivial optimization[s]...
245 static void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16],
246 const u8 *inp, size_t len)
250 size_t rem, nlo, nhi;
255 nlo = ((const u8 *)Xi)[15];
260 Z.hi = Htable[nlo].hi;
261 Z.lo = Htable[nlo].lo;
264 rem = (size_t)Z.lo & 0xf;
265 Z.lo = (Z.hi << 60) | (Z.lo >> 4);
267 if (sizeof(size_t) == 8)
268 Z.hi ^= rem_4bit[rem];
270 Z.hi ^= (u64)rem_4bit[rem] << 32;
272 Z.hi ^= Htable[nhi].hi;
273 Z.lo ^= Htable[nhi].lo;
278 nlo = ((const u8 *)Xi)[cnt];
283 rem = (size_t)Z.lo & 0xf;
284 Z.lo = (Z.hi << 60) | (Z.lo >> 4);
286 if (sizeof(size_t) == 8)
287 Z.hi ^= rem_4bit[rem];
289 Z.hi ^= (u64)rem_4bit[rem] << 32;
291 Z.hi ^= Htable[nlo].hi;
292 Z.lo ^= Htable[nlo].lo;
295 if (IS_LITTLE_ENDIAN) {
297 Xi[0] = BSWAP8(Z.hi);
298 Xi[1] = BSWAP8(Z.lo);
302 v = (u32)(Z.hi >> 32);
306 v = (u32)(Z.lo >> 32);
317 /* Block size is 128 bits so len is a multiple of 16 */
323 void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]);
324 void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], const u8 *inp,
328 # define GCM_MUL(ctx) ctx->funcs.gmult(ctx->Xi.u,ctx->Htable)
329 # if defined(GHASH_ASM) || !defined(OPENSSL_SMALL_FOOTPRINT)
330 # define GHASH(ctx,in,len) ctx->funcs.ghash((ctx)->Xi.u,(ctx)->Htable,in,len)
332 * GHASH_CHUNK is "stride parameter" missioned to mitigate cache trashing
333 * effect. In other words idea is to hash data while it's still in L1 cache
334 * after encryption pass...
336 # define GHASH_CHUNK (3*1024)
339 #if (defined(GHASH_ASM) || defined(OPENSSL_CPUID_OBJ))
340 # if !defined(I386_ONLY) && \
341 (defined(__i386) || defined(__i386__) || \
342 defined(__x86_64) || defined(__x86_64__) || \
343 defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64))
344 # define GHASH_ASM_X86_OR_64
346 void gcm_init_clmul(u128 Htable[16], const u64 Xi[2]);
347 void gcm_gmult_clmul(u64 Xi[2], const u128 Htable[16]);
348 void gcm_ghash_clmul(u64 Xi[2], const u128 Htable[16], const u8 *inp,
351 # if defined(__i386) || defined(__i386__) || defined(_M_IX86)
352 # define gcm_init_avx gcm_init_clmul
353 # define gcm_gmult_avx gcm_gmult_clmul
354 # define gcm_ghash_avx gcm_ghash_clmul
356 void gcm_init_avx(u128 Htable[16], const u64 Xi[2]);
357 void gcm_gmult_avx(u64 Xi[2], const u128 Htable[16]);
358 void gcm_ghash_avx(u64 Xi[2], const u128 Htable[16], const u8 *inp,
362 # if defined(__i386) || defined(__i386__) || defined(_M_IX86)
363 # define GHASH_ASM_X86
364 void gcm_gmult_4bit_mmx(u64 Xi[2], const u128 Htable[16]);
365 void gcm_ghash_4bit_mmx(u64 Xi[2], const u128 Htable[16], const u8 *inp,
368 void gcm_gmult_4bit_x86(u64 Xi[2], const u128 Htable[16]);
369 void gcm_ghash_4bit_x86(u64 Xi[2], const u128 Htable[16], const u8 *inp,
372 # elif defined(__arm__) || defined(__arm) || defined(__aarch64__) || defined(_M_ARM64)
373 # include "arm_arch.h"
374 # if __ARM_MAX_ARCH__>=7
375 # define GHASH_ASM_ARM
376 # define PMULL_CAPABLE (OPENSSL_armcap_P & ARMV8_PMULL)
377 # if defined(__arm__) || defined(__arm)
378 # define NEON_CAPABLE (OPENSSL_armcap_P & ARMV7_NEON)
380 void gcm_init_neon(u128 Htable[16], const u64 Xi[2]);
381 void gcm_gmult_neon(u64 Xi[2], const u128 Htable[16]);
382 void gcm_ghash_neon(u64 Xi[2], const u128 Htable[16], const u8 *inp,
384 void gcm_init_v8(u128 Htable[16], const u64 Xi[2]);
385 void gcm_gmult_v8(u64 Xi[2], const u128 Htable[16]);
386 void gcm_ghash_v8(u64 Xi[2], const u128 Htable[16], const u8 *inp,
389 # elif defined(__sparc__) || defined(__sparc)
390 # include "crypto/sparc_arch.h"
391 # define GHASH_ASM_SPARC
392 void gcm_init_vis3(u128 Htable[16], const u64 Xi[2]);
393 void gcm_gmult_vis3(u64 Xi[2], const u128 Htable[16]);
394 void gcm_ghash_vis3(u64 Xi[2], const u128 Htable[16], const u8 *inp,
396 # elif defined(OPENSSL_CPUID_OBJ) && (defined(__powerpc__) || defined(__POWERPC__) || defined(_ARCH_PPC))
397 # include "crypto/ppc_arch.h"
398 # define GHASH_ASM_PPC
399 void gcm_init_p8(u128 Htable[16], const u64 Xi[2]);
400 void gcm_gmult_p8(u64 Xi[2], const u128 Htable[16]);
401 void gcm_ghash_p8(u64 Xi[2], const u128 Htable[16], const u8 *inp,
403 # elif defined(OPENSSL_CPUID_OBJ) && defined(__riscv) && __riscv_xlen == 64
404 # include "crypto/riscv_arch.h"
405 # define GHASH_ASM_RV64I
406 /* Zbc/Zbkc (scalar crypto with clmul) based routines. */
407 void gcm_init_rv64i_zbc(u128 Htable[16], const u64 Xi[2]);
408 void gcm_init_rv64i_zbc__zbb(u128 Htable[16], const u64 Xi[2]);
409 void gcm_init_rv64i_zbc__zbkb(u128 Htable[16], const u64 Xi[2]);
410 void gcm_gmult_rv64i_zbc(u64 Xi[2], const u128 Htable[16]);
411 void gcm_gmult_rv64i_zbc__zbkb(u64 Xi[2], const u128 Htable[16]);
412 void gcm_ghash_rv64i_zbc(u64 Xi[2], const u128 Htable[16],
413 const u8 *inp, size_t len);
414 void gcm_ghash_rv64i_zbc__zbkb(u64 Xi[2], const u128 Htable[16],
415 const u8 *inp, size_t len);
416 /* zvkb/Zvbc (vector crypto with vclmul) based routines. */
417 void gcm_init_rv64i_zvkb_zvbc(u128 Htable[16], const u64 Xi[2]);
418 void gcm_gmult_rv64i_zvkb_zvbc(u64 Xi[2], const u128 Htable[16]);
419 void gcm_ghash_rv64i_zvkb_zvbc(u64 Xi[2], const u128 Htable[16],
420 const u8 *inp, size_t len);
421 /* Zvkg (vector crypto with vgmul.vv and vghsh.vv). */
422 void gcm_init_rv64i_zvkg(u128 Htable[16], const u64 Xi[2]);
423 void gcm_init_rv64i_zvkg_zvkb(u128 Htable[16], const u64 Xi[2]);
424 void gcm_gmult_rv64i_zvkg(u64 Xi[2], const u128 Htable[16]);
425 void gcm_ghash_rv64i_zvkg(u64 Xi[2], const u128 Htable[16],
426 const u8 *inp, size_t len);
430 static void gcm_get_funcs(struct gcm_funcs_st *ctx)
432 /* set defaults -- overridden below as needed */
433 ctx->ginit = gcm_init_4bit;
434 #if !defined(GHASH_ASM)
435 ctx->gmult = gcm_gmult_4bit;
439 #if !defined(GHASH_ASM) && !defined(OPENSSL_SMALL_FOOTPRINT)
440 ctx->ghash = gcm_ghash_4bit;
445 #if defined(GHASH_ASM_X86_OR_64)
446 # if !defined(GHASH_ASM_X86) || defined(OPENSSL_IA32_SSE2)
448 if (OPENSSL_ia32cap_P[1] & (1 << 1)) { /* check PCLMULQDQ bit */
449 if (((OPENSSL_ia32cap_P[1] >> 22) & 0x41) == 0x41) { /* AVX+MOVBE */
450 ctx->ginit = gcm_init_avx;
451 ctx->gmult = gcm_gmult_avx;
452 ctx->ghash = gcm_ghash_avx;
454 ctx->ginit = gcm_init_clmul;
455 ctx->gmult = gcm_gmult_clmul;
456 ctx->ghash = gcm_ghash_clmul;
461 # if defined(GHASH_ASM_X86)
463 # if defined(OPENSSL_IA32_SSE2)
464 if (OPENSSL_ia32cap_P[0] & (1 << 25)) { /* check SSE bit */
465 ctx->gmult = gcm_gmult_4bit_mmx;
466 ctx->ghash = gcm_ghash_4bit_mmx;
470 if (OPENSSL_ia32cap_P[0] & (1 << 23)) { /* check MMX bit */
471 ctx->gmult = gcm_gmult_4bit_mmx;
472 ctx->ghash = gcm_ghash_4bit_mmx;
476 ctx->gmult = gcm_gmult_4bit_x86;
477 ctx->ghash = gcm_ghash_4bit_x86;
480 /* x86_64 fallback defaults */
481 ctx->gmult = gcm_gmult_4bit;
482 ctx->ghash = gcm_ghash_4bit;
485 #elif defined(GHASH_ASM_ARM)
487 ctx->gmult = gcm_gmult_4bit;
488 ctx->ghash = gcm_ghash_4bit;
489 # ifdef PMULL_CAPABLE
491 ctx->ginit = (gcm_init_fn)gcm_init_v8;
492 ctx->gmult = gcm_gmult_v8;
493 ctx->ghash = gcm_ghash_v8;
495 # elif defined(NEON_CAPABLE)
497 ctx->ginit = gcm_init_neon;
498 ctx->gmult = gcm_gmult_neon;
499 ctx->ghash = gcm_ghash_neon;
503 #elif defined(GHASH_ASM_SPARC)
505 ctx->gmult = gcm_gmult_4bit;
506 ctx->ghash = gcm_ghash_4bit;
507 if (OPENSSL_sparcv9cap_P[0] & SPARCV9_VIS3) {
508 ctx->ginit = gcm_init_vis3;
509 ctx->gmult = gcm_gmult_vis3;
510 ctx->ghash = gcm_ghash_vis3;
513 #elif defined(GHASH_ASM_PPC)
514 /* PowerPC does not define GHASH_ASM; defaults set above */
515 if (OPENSSL_ppccap_P & PPC_CRYPTO207) {
516 ctx->ginit = gcm_init_p8;
517 ctx->gmult = gcm_gmult_p8;
518 ctx->ghash = gcm_ghash_p8;
521 #elif defined(GHASH_ASM_RV64I)
523 ctx->gmult = gcm_gmult_4bit;
524 ctx->ghash = gcm_ghash_4bit;
526 if (RISCV_HAS_ZVKG() && riscv_vlen() >= 128) {
527 if (RISCV_HAS_ZVKB())
528 ctx->ginit = gcm_init_rv64i_zvkg_zvkb;
530 ctx->ginit = gcm_init_rv64i_zvkg;
531 ctx->gmult = gcm_gmult_rv64i_zvkg;
532 ctx->ghash = gcm_ghash_rv64i_zvkg;
533 } else if (RISCV_HAS_ZVKB() && RISCV_HAS_ZVBC() && riscv_vlen() >= 128) {
534 ctx->ginit = gcm_init_rv64i_zvkb_zvbc;
535 ctx->gmult = gcm_gmult_rv64i_zvkb_zvbc;
536 ctx->ghash = gcm_ghash_rv64i_zvkb_zvbc;
537 } else if (RISCV_HAS_ZBC()) {
538 if (RISCV_HAS_ZBKB()) {
539 ctx->ginit = gcm_init_rv64i_zbc__zbkb;
540 ctx->gmult = gcm_gmult_rv64i_zbc__zbkb;
541 ctx->ghash = gcm_ghash_rv64i_zbc__zbkb;
542 } else if (RISCV_HAS_ZBB()) {
543 ctx->ginit = gcm_init_rv64i_zbc__zbb;
544 ctx->gmult = gcm_gmult_rv64i_zbc;
545 ctx->ghash = gcm_ghash_rv64i_zbc;
547 ctx->ginit = gcm_init_rv64i_zbc;
548 ctx->gmult = gcm_gmult_rv64i_zbc;
549 ctx->ghash = gcm_ghash_rv64i_zbc;
553 #elif defined(GHASH_ASM)
554 /* all other architectures use the generic names */
555 ctx->gmult = gcm_gmult_4bit;
556 ctx->ghash = gcm_ghash_4bit;
561 void ossl_gcm_init_4bit(u128 Htable[16], const u64 H[2])
563 struct gcm_funcs_st funcs;
565 gcm_get_funcs(&funcs);
566 funcs.ginit(Htable, H);
569 void ossl_gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16])
571 struct gcm_funcs_st funcs;
573 gcm_get_funcs(&funcs);
574 funcs.gmult(Xi, Htable);
577 void ossl_gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16],
578 const u8 *inp, size_t len)
580 struct gcm_funcs_st funcs;
584 gcm_get_funcs(&funcs);
585 if (funcs.ghash != NULL) {
586 funcs.ghash(Xi, Htable, inp, len);
588 /* Emulate ghash if needed */
589 for (i = 0; i < len; i += 16) {
590 memcpy(tmp, &inp[i], sizeof(tmp));
593 funcs.gmult(Xi, Htable);
598 void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block)
602 memset(ctx, 0, sizeof(*ctx));
606 (*block) (ctx->H.c, ctx->H.c, key);
608 if (IS_LITTLE_ENDIAN) {
609 /* H is stored in host byte order */
611 ctx->H.u[0] = BSWAP8(ctx->H.u[0]);
612 ctx->H.u[1] = BSWAP8(ctx->H.u[1]);
616 hi = (u64)GETU32(p) << 32 | GETU32(p + 4);
617 lo = (u64)GETU32(p + 8) << 32 | GETU32(p + 12);
623 gcm_get_funcs(&ctx->funcs);
624 ctx->funcs.ginit(ctx->Htable, ctx->H.u);
627 void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv,
633 ctx->len.u[0] = 0; /* AAD length */
634 ctx->len.u[1] = 0; /* message length */
639 memcpy(ctx->Yi.c, iv, 12);
649 /* Borrow ctx->Xi to calculate initial Yi */
654 for (i = 0; i < 16; ++i)
655 ctx->Xi.c[i] ^= iv[i];
661 for (i = 0; i < len; ++i)
662 ctx->Xi.c[i] ^= iv[i];
666 if (IS_LITTLE_ENDIAN) {
668 ctx->Xi.u[1] ^= BSWAP8(len0);
670 ctx->Xi.c[8] ^= (u8)(len0 >> 56);
671 ctx->Xi.c[9] ^= (u8)(len0 >> 48);
672 ctx->Xi.c[10] ^= (u8)(len0 >> 40);
673 ctx->Xi.c[11] ^= (u8)(len0 >> 32);
674 ctx->Xi.c[12] ^= (u8)(len0 >> 24);
675 ctx->Xi.c[13] ^= (u8)(len0 >> 16);
676 ctx->Xi.c[14] ^= (u8)(len0 >> 8);
677 ctx->Xi.c[15] ^= (u8)(len0);
680 ctx->Xi.u[1] ^= len0;
685 if (IS_LITTLE_ENDIAN)
687 ctr = BSWAP4(ctx->Xi.d[3]);
689 ctr = GETU32(ctx->Xi.c + 12);
694 /* Copy borrowed Xi to Yi */
695 ctx->Yi.u[0] = ctx->Xi.u[0];
696 ctx->Yi.u[1] = ctx->Xi.u[1];
702 (*ctx->block) (ctx->Yi.c, ctx->EK0.c, ctx->key);
704 if (IS_LITTLE_ENDIAN)
706 ctx->Yi.d[3] = BSWAP4(ctr);
708 PUTU32(ctx->Yi.c + 12, ctr);
714 int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const unsigned char *aad,
719 u64 alen = ctx->len.u[0];
725 if (alen > (U64(1) << 61) || (sizeof(len) == 8 && alen < len))
727 ctx->len.u[0] = alen;
732 ctx->Xi.c[n] ^= *(aad++);
744 if ((i = (len & (size_t)-16))) {
751 for (i = 0; i < 16; ++i)
752 ctx->Xi.c[i] ^= aad[i];
759 n = (unsigned int)len;
760 for (i = 0; i < len; ++i)
761 ctx->Xi.c[i] ^= aad[i];
768 int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
769 const unsigned char *in, unsigned char *out,
773 unsigned int n, ctr, mres;
775 u64 mlen = ctx->len.u[1];
776 block128_f block = ctx->block;
777 void *key = ctx->key;
780 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
782 ctx->len.u[1] = mlen;
787 /* First call to encrypt finalizes GHASH(AAD) */
788 #if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
794 memcpy(ctx->Xn, ctx->Xi.c, sizeof(ctx->Xi));
797 mres = sizeof(ctx->Xi);
804 if (IS_LITTLE_ENDIAN)
806 ctr = BSWAP4(ctx->Yi.d[3]);
808 ctr = GETU32(ctx->Yi.c + 12);
814 #if !defined(OPENSSL_SMALL_FOOTPRINT)
815 if (16 % sizeof(size_t) == 0) { /* always true actually */
820 ctx->Xn[mres++] = *(out++) = *(in++) ^ ctx->EKi.c[n];
825 GHASH(ctx, ctx->Xn, mres);
833 ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
846 # if defined(STRICT_ALIGNMENT)
847 if (((size_t)in | (size_t)out) % sizeof(size_t) != 0)
851 if (len >= 16 && mres) {
852 GHASH(ctx, ctx->Xn, mres);
855 # if defined(GHASH_CHUNK)
856 while (len >= GHASH_CHUNK) {
857 size_t j = GHASH_CHUNK;
860 size_t_aX *out_t = (size_t_aX *)out;
861 const size_t_aX *in_t = (const size_t_aX *)in;
863 (*block) (ctx->Yi.c, ctx->EKi.c, key);
865 if (IS_LITTLE_ENDIAN)
867 ctx->Yi.d[3] = BSWAP4(ctr);
869 PUTU32(ctx->Yi.c + 12, ctr);
873 for (i = 0; i < 16 / sizeof(size_t); ++i)
874 out_t[i] = in_t[i] ^ ctx->EKi.t[i];
879 GHASH(ctx, out - GHASH_CHUNK, GHASH_CHUNK);
883 if ((i = (len & (size_t)-16))) {
887 size_t_aX *out_t = (size_t_aX *)out;
888 const size_t_aX *in_t = (const size_t_aX *)in;
890 (*block) (ctx->Yi.c, ctx->EKi.c, key);
892 if (IS_LITTLE_ENDIAN)
894 ctx->Yi.d[3] = BSWAP4(ctr);
896 PUTU32(ctx->Yi.c + 12, ctr);
900 for (i = 0; i < 16 / sizeof(size_t); ++i)
901 out_t[i] = in_t[i] ^ ctx->EKi.t[i];
906 GHASH(ctx, out - j, j);
910 size_t *out_t = (size_t *)out;
911 const size_t *in_t = (const size_t *)in;
913 (*block) (ctx->Yi.c, ctx->EKi.c, key);
915 if (IS_LITTLE_ENDIAN)
917 ctx->Yi.d[3] = BSWAP4(ctr);
919 PUTU32(ctx->Yi.c + 12, ctr);
923 for (i = 0; i < 16 / sizeof(size_t); ++i)
924 ctx->Xi.t[i] ^= out_t[i] = in_t[i] ^ ctx->EKi.t[i];
932 (*block) (ctx->Yi.c, ctx->EKi.c, key);
934 if (IS_LITTLE_ENDIAN)
936 ctx->Yi.d[3] = BSWAP4(ctr);
938 PUTU32(ctx->Yi.c + 12, ctr);
944 ctx->Xn[mres++] = out[n] = in[n] ^ ctx->EKi.c[n];
949 ctx->Xi.c[n] ^= out[n] = in[n] ^ ctx->EKi.c[n];
961 for (i = 0; i < len; ++i) {
963 (*block) (ctx->Yi.c, ctx->EKi.c, key);
965 if (IS_LITTLE_ENDIAN)
967 ctx->Yi.d[3] = BSWAP4(ctr);
969 PUTU32(ctx->Yi.c + 12, ctr);
974 #if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
975 ctx->Xn[mres++] = out[i] = in[i] ^ ctx->EKi.c[n];
977 if (mres == sizeof(ctx->Xn)) {
978 GHASH(ctx,ctx->Xn,sizeof(ctx->Xn));
982 ctx->Xi.c[n] ^= out[i] = in[i] ^ ctx->EKi.c[n];
983 mres = n = (n + 1) % 16;
993 int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
994 const unsigned char *in, unsigned char *out,
998 unsigned int n, ctr, mres;
1000 u64 mlen = ctx->len.u[1];
1001 block128_f block = ctx->block;
1002 void *key = ctx->key;
1005 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
1007 ctx->len.u[1] = mlen;
1012 /* First call to decrypt finalizes GHASH(AAD) */
1013 #if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
1019 memcpy(ctx->Xn, ctx->Xi.c, sizeof(ctx->Xi));
1022 mres = sizeof(ctx->Xi);
1029 if (IS_LITTLE_ENDIAN)
1031 ctr = BSWAP4(ctx->Yi.d[3]);
1033 ctr = GETU32(ctx->Yi.c + 12);
1039 #if !defined(OPENSSL_SMALL_FOOTPRINT)
1040 if (16 % sizeof(size_t) == 0) { /* always true actually */
1045 *(out++) = (ctx->Xn[mres++] = *(in++)) ^ ctx->EKi.c[n];
1050 GHASH(ctx, ctx->Xn, mres);
1059 *(out++) = c ^ ctx->EKi.c[n];
1073 # if defined(STRICT_ALIGNMENT)
1074 if (((size_t)in | (size_t)out) % sizeof(size_t) != 0)
1078 if (len >= 16 && mres) {
1079 GHASH(ctx, ctx->Xn, mres);
1082 # if defined(GHASH_CHUNK)
1083 while (len >= GHASH_CHUNK) {
1084 size_t j = GHASH_CHUNK;
1086 GHASH(ctx, in, GHASH_CHUNK);
1088 size_t_aX *out_t = (size_t_aX *)out;
1089 const size_t_aX *in_t = (const size_t_aX *)in;
1091 (*block) (ctx->Yi.c, ctx->EKi.c, key);
1093 if (IS_LITTLE_ENDIAN)
1095 ctx->Yi.d[3] = BSWAP4(ctr);
1097 PUTU32(ctx->Yi.c + 12, ctr);
1101 for (i = 0; i < 16 / sizeof(size_t); ++i)
1102 out_t[i] = in_t[i] ^ ctx->EKi.t[i];
1110 if ((i = (len & (size_t)-16))) {
1113 size_t_aX *out_t = (size_t_aX *)out;
1114 const size_t_aX *in_t = (const size_t_aX *)in;
1116 (*block) (ctx->Yi.c, ctx->EKi.c, key);
1118 if (IS_LITTLE_ENDIAN)
1120 ctx->Yi.d[3] = BSWAP4(ctr);
1122 PUTU32(ctx->Yi.c + 12, ctr);
1126 for (i = 0; i < 16 / sizeof(size_t); ++i)
1127 out_t[i] = in_t[i] ^ ctx->EKi.t[i];
1135 size_t *out_t = (size_t *)out;
1136 const size_t *in_t = (const size_t *)in;
1138 (*block) (ctx->Yi.c, ctx->EKi.c, key);
1140 if (IS_LITTLE_ENDIAN)
1142 ctx->Yi.d[3] = BSWAP4(ctr);
1144 PUTU32(ctx->Yi.c + 12, ctr);
1148 for (i = 0; i < 16 / sizeof(size_t); ++i) {
1150 out_t[i] = c ^ ctx->EKi.t[i];
1160 (*block) (ctx->Yi.c, ctx->EKi.c, key);
1162 if (IS_LITTLE_ENDIAN)
1164 ctx->Yi.d[3] = BSWAP4(ctr);
1166 PUTU32(ctx->Yi.c + 12, ctr);
1172 out[n] = (ctx->Xn[mres++] = in[n]) ^ ctx->EKi.c[n];
1179 out[n] = c ^ ctx->EKi.c[n];
1191 for (i = 0; i < len; ++i) {
1194 (*block) (ctx->Yi.c, ctx->EKi.c, key);
1196 if (IS_LITTLE_ENDIAN)
1198 ctx->Yi.d[3] = BSWAP4(ctr);
1200 PUTU32(ctx->Yi.c + 12, ctr);
1205 #if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
1206 out[i] = (ctx->Xn[mres++] = c = in[i]) ^ ctx->EKi.c[n];
1208 if (mres == sizeof(ctx->Xn)) {
1209 GHASH(ctx,ctx->Xn,sizeof(ctx->Xn));
1214 out[i] = c ^ ctx->EKi.c[n];
1216 mres = n = (n + 1) % 16;
1226 int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
1227 const unsigned char *in, unsigned char *out,
1228 size_t len, ctr128_f stream)
1230 #if defined(OPENSSL_SMALL_FOOTPRINT)
1231 return CRYPTO_gcm128_encrypt(ctx, in, out, len);
1234 unsigned int n, ctr, mres;
1236 u64 mlen = ctx->len.u[1];
1237 void *key = ctx->key;
1240 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
1242 ctx->len.u[1] = mlen;
1247 /* First call to encrypt finalizes GHASH(AAD) */
1254 memcpy(ctx->Xn, ctx->Xi.c, sizeof(ctx->Xi));
1257 mres = sizeof(ctx->Xi);
1264 if (IS_LITTLE_ENDIAN)
1266 ctr = BSWAP4(ctx->Yi.d[3]);
1268 ctr = GETU32(ctx->Yi.c + 12);
1277 ctx->Xn[mres++] = *(out++) = *(in++) ^ ctx->EKi.c[n];
1282 GHASH(ctx, ctx->Xn, mres);
1290 ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
1304 if (len >= 16 && mres) {
1305 GHASH(ctx, ctx->Xn, mres);
1308 # if defined(GHASH_CHUNK)
1309 while (len >= GHASH_CHUNK) {
1310 (*stream) (in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
1311 ctr += GHASH_CHUNK / 16;
1312 if (IS_LITTLE_ENDIAN)
1314 ctx->Yi.d[3] = BSWAP4(ctr);
1316 PUTU32(ctx->Yi.c + 12, ctr);
1320 GHASH(ctx, out, GHASH_CHUNK);
1327 if ((i = (len & (size_t)-16))) {
1330 (*stream) (in, out, j, key, ctx->Yi.c);
1331 ctr += (unsigned int)j;
1332 if (IS_LITTLE_ENDIAN)
1334 ctx->Yi.d[3] = BSWAP4(ctr);
1336 PUTU32(ctx->Yi.c + 12, ctr);
1347 for (i = 0; i < 16; ++i)
1348 ctx->Xi.c[i] ^= out[i];
1355 (*ctx->block) (ctx->Yi.c, ctx->EKi.c, key);
1357 if (IS_LITTLE_ENDIAN)
1359 ctx->Yi.d[3] = BSWAP4(ctr);
1361 PUTU32(ctx->Yi.c + 12, ctr);
1367 ctx->Xn[mres++] = out[n] = in[n] ^ ctx->EKi.c[n];
1369 ctx->Xi.c[mres++] ^= out[n] = in[n] ^ ctx->EKi.c[n];
1380 int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
1381 const unsigned char *in, unsigned char *out,
1382 size_t len, ctr128_f stream)
1384 #if defined(OPENSSL_SMALL_FOOTPRINT)
1385 return CRYPTO_gcm128_decrypt(ctx, in, out, len);
1388 unsigned int n, ctr, mres;
1390 u64 mlen = ctx->len.u[1];
1391 void *key = ctx->key;
1394 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
1396 ctx->len.u[1] = mlen;
1401 /* First call to decrypt finalizes GHASH(AAD) */
1408 memcpy(ctx->Xn, ctx->Xi.c, sizeof(ctx->Xi));
1411 mres = sizeof(ctx->Xi);
1418 if (IS_LITTLE_ENDIAN)
1420 ctr = BSWAP4(ctx->Yi.d[3]);
1422 ctr = GETU32(ctx->Yi.c + 12);
1431 *(out++) = (ctx->Xn[mres++] = *(in++)) ^ ctx->EKi.c[n];
1436 GHASH(ctx, ctx->Xn, mres);
1445 *(out++) = c ^ ctx->EKi.c[n];
1460 if (len >= 16 && mres) {
1461 GHASH(ctx, ctx->Xn, mres);
1464 # if defined(GHASH_CHUNK)
1465 while (len >= GHASH_CHUNK) {
1466 GHASH(ctx, in, GHASH_CHUNK);
1467 (*stream) (in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
1468 ctr += GHASH_CHUNK / 16;
1469 if (IS_LITTLE_ENDIAN)
1471 ctx->Yi.d[3] = BSWAP4(ctr);
1473 PUTU32(ctx->Yi.c + 12, ctr);
1483 if ((i = (len & (size_t)-16))) {
1491 for (k = 0; k < 16; ++k)
1492 ctx->Xi.c[k] ^= in[k];
1499 (*stream) (in, out, j, key, ctx->Yi.c);
1500 ctr += (unsigned int)j;
1501 if (IS_LITTLE_ENDIAN)
1503 ctx->Yi.d[3] = BSWAP4(ctr);
1505 PUTU32(ctx->Yi.c + 12, ctr);
1514 (*ctx->block) (ctx->Yi.c, ctx->EKi.c, key);
1516 if (IS_LITTLE_ENDIAN)
1518 ctx->Yi.d[3] = BSWAP4(ctr);
1520 PUTU32(ctx->Yi.c + 12, ctr);
1526 out[n] = (ctx->Xn[mres++] = in[n]) ^ ctx->EKi.c[n];
1529 ctx->Xi.c[mres++] ^= c;
1530 out[n] = c ^ ctx->EKi.c[n];
1541 int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag,
1545 u64 alen = ctx->len.u[0] << 3;
1546 u64 clen = ctx->len.u[1] << 3;
1548 #if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
1550 unsigned int mres = ctx->mres;
1553 unsigned blocks = (mres + 15) & -16;
1555 memset(ctx->Xn + mres, 0, blocks - mres);
1557 if (mres == sizeof(ctx->Xn)) {
1558 GHASH(ctx, ctx->Xn, mres);
1561 } else if (ctx->ares) {
1565 if (ctx->mres || ctx->ares)
1569 if (IS_LITTLE_ENDIAN) {
1571 alen = BSWAP8(alen);
1572 clen = BSWAP8(clen);
1576 ctx->len.u[0] = alen;
1577 ctx->len.u[1] = clen;
1579 alen = (u64)GETU32(p) << 32 | GETU32(p + 4);
1580 clen = (u64)GETU32(p + 8) << 32 | GETU32(p + 12);
1584 #if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
1587 memcpy(ctx->Xn + mres, &bitlen, sizeof(bitlen));
1588 mres += sizeof(bitlen);
1589 GHASH(ctx, ctx->Xn, mres);
1591 ctx->Xi.u[0] ^= alen;
1592 ctx->Xi.u[1] ^= clen;
1596 ctx->Xi.u[0] ^= ctx->EK0.u[0];
1597 ctx->Xi.u[1] ^= ctx->EK0.u[1];
1599 if (tag && len <= sizeof(ctx->Xi))
1600 return CRYPTO_memcmp(ctx->Xi.c, tag, len);
1605 void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len)
1607 CRYPTO_gcm128_finish(ctx, NULL, 0);
1608 memcpy(tag, ctx->Xi.c,
1609 len <= sizeof(ctx->Xi.c) ? len : sizeof(ctx->Xi.c));
1612 GCM128_CONTEXT *CRYPTO_gcm128_new(void *key, block128_f block)
1614 GCM128_CONTEXT *ret;
1616 if ((ret = OPENSSL_malloc(sizeof(*ret))) != NULL)
1617 CRYPTO_gcm128_init(ret, key, block);
1622 void CRYPTO_gcm128_release(GCM128_CONTEXT *ctx)
1624 OPENSSL_clear_free(ctx, sizeof(*ctx));