2 * Copyright 2011-2020 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
11 * All low level APIs are deprecated for public use, but still ok for internal
12 * use where we're using them to implement the higher level EVP interface, as is
15 #include "internal/deprecated.h"
17 #include "cipher_aes_cbc_hmac_sha.h"
19 #if !defined(AES_CBC_HMAC_SHA_CAPABLE) || !defined(AESNI_CAPABLE)
20 int ossl_cipher_capable_aes_cbc_hmac_sha1(void)
25 const PROV_CIPHER_HW_AES_HMAC_SHA *ossl_prov_cipher_hw_aes_cbc_hmac_sha1(void)
31 # include <openssl/rand.h>
32 # include "crypto/evp.h"
33 # include "internal/constant_time.h"
35 void sha1_block_data_order(void *c, const void *p, size_t len);
36 void aesni_cbc_sha1_enc(const void *inp, void *out, size_t blocks,
37 const AES_KEY *key, unsigned char iv[16],
38 SHA_CTX *ctx, const void *in0);
40 int ossl_cipher_capable_aes_cbc_hmac_sha1(void)
42 return AESNI_CBC_HMAC_SHA_CAPABLE;
45 static int aesni_cbc_hmac_sha1_init_key(PROV_CIPHER_CTX *vctx,
46 const unsigned char *key, size_t keylen)
49 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
50 PROV_AES_HMAC_SHA1_CTX *sctx = (PROV_AES_HMAC_SHA1_CTX *)vctx;
53 ret = aesni_set_encrypt_key(key, keylen * 8, &ctx->ks);
55 ret = aesni_set_decrypt_key(key, keylen * 8, &ctx->ks);
57 SHA1_Init(&sctx->head); /* handy when benchmarking */
58 sctx->tail = sctx->head;
59 sctx->md = sctx->head;
61 ctx->payload_length = NO_PAYLOAD_LENGTH;
63 vctx->removetlspad = 1;
64 vctx->removetlsfixed = SHA_DIGEST_LENGTH + AES_BLOCK_SIZE;
66 return ret < 0 ? 0 : 1;
69 static void sha1_update(SHA_CTX *c, const void *data, size_t len)
71 const unsigned char *ptr = data;
75 res = SHA_CBLOCK - res;
78 SHA1_Update(c, ptr, res);
83 res = len % SHA_CBLOCK;
87 sha1_block_data_order(c, ptr, len / SHA_CBLOCK);
92 if (c->Nl < (unsigned int)len)
97 SHA1_Update(c, ptr, res);
100 # if !defined(OPENSSL_NO_MULTIBLOCK)
103 unsigned int A[8], B[8], C[8], D[8], E[8];
107 const unsigned char *ptr;
112 const unsigned char *inp;
118 void sha1_multi_block(SHA1_MB_CTX *, const HASH_DESC *, int);
119 void aesni_multi_cbc_encrypt(CIPH_DESC *, void *, int);
121 static size_t tls1_multi_block_encrypt(void *vctx,
123 const unsigned char *inp,
124 size_t inp_len, int n4x)
125 { /* n4x is 1 or 2 */
126 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
127 PROV_AES_HMAC_SHA1_CTX *sctx = (PROV_AES_HMAC_SHA1_CTX *)vctx;
128 HASH_DESC hash_d[8], edges[8];
130 unsigned char storage[sizeof(SHA1_MB_CTX) + 32];
137 unsigned int frag, last, packlen, i;
138 unsigned int x4 = 4 * n4x, minblocks, processed = 0;
145 /* ask for IVs in bulk */
146 if (RAND_bytes_ex(ctx->base.libctx, (IVs = blocks[0].c), 16 * x4) <= 0)
149 mctx = (SHA1_MB_CTX *) (storage + 32 - ((size_t)storage % 32)); /* align */
151 frag = (unsigned int)inp_len >> (1 + n4x);
152 last = (unsigned int)inp_len + frag - (frag << (1 + n4x));
153 if (last > frag && ((last + 13 + 9) % 64) < (x4 - 1)) {
158 packlen = 5 + 16 + ((frag + 20 + 16) & -16);
160 /* populate descriptors with pointers and IVs */
163 /* 5+16 is place for header and explicit IV */
164 ciph_d[0].out = out + 5 + 16;
165 memcpy(ciph_d[0].out - 16, IVs, 16);
166 memcpy(ciph_d[0].iv, IVs, 16);
169 for (i = 1; i < x4; i++) {
170 ciph_d[i].inp = hash_d[i].ptr = hash_d[i - 1].ptr + frag;
171 ciph_d[i].out = ciph_d[i - 1].out + packlen;
172 memcpy(ciph_d[i].out - 16, IVs, 16);
173 memcpy(ciph_d[i].iv, IVs, 16);
178 memcpy(blocks[0].c, sctx->md.data, 8);
179 seqnum = BSWAP8(blocks[0].q[0]);
181 for (i = 0; i < x4; i++) {
182 unsigned int len = (i == (x4 - 1) ? last : frag);
183 # if !defined(BSWAP8)
184 unsigned int carry, j;
187 mctx->A[i] = sctx->md.h0;
188 mctx->B[i] = sctx->md.h1;
189 mctx->C[i] = sctx->md.h2;
190 mctx->D[i] = sctx->md.h3;
191 mctx->E[i] = sctx->md.h4;
195 blocks[i].q[0] = BSWAP8(seqnum + i);
197 for (carry = i, j = 8; j--;) {
198 blocks[i].c[j] = ((u8 *)sctx->md.data)[j] + carry;
199 carry = (blocks[i].c[j] - carry) >> (sizeof(carry) * 8 - 1);
202 blocks[i].c[8] = ((u8 *)sctx->md.data)[8];
203 blocks[i].c[9] = ((u8 *)sctx->md.data)[9];
204 blocks[i].c[10] = ((u8 *)sctx->md.data)[10];
206 blocks[i].c[11] = (u8)(len >> 8);
207 blocks[i].c[12] = (u8)(len);
209 memcpy(blocks[i].c + 13, hash_d[i].ptr, 64 - 13);
210 hash_d[i].ptr += 64 - 13;
211 hash_d[i].blocks = (len - (64 - 13)) / 64;
213 edges[i].ptr = blocks[i].c;
217 /* hash 13-byte headers and first 64-13 bytes of inputs */
218 sha1_multi_block(mctx, edges, n4x);
219 /* hash bulk inputs */
220 # define MAXCHUNKSIZE 2048
222 # error "MAXCHUNKSIZE is not divisible by 64"
225 * goal is to minimize pressure on L1 cache by moving in shorter steps,
226 * so that hashed data is still in the cache by the time we encrypt it
228 minblocks = ((frag <= last ? frag : last) - (64 - 13)) / 64;
229 if (minblocks > MAXCHUNKSIZE / 64) {
230 for (i = 0; i < x4; i++) {
231 edges[i].ptr = hash_d[i].ptr;
232 edges[i].blocks = MAXCHUNKSIZE / 64;
233 ciph_d[i].blocks = MAXCHUNKSIZE / 16;
236 sha1_multi_block(mctx, edges, n4x);
237 aesni_multi_cbc_encrypt(ciph_d, &ctx->ks, n4x);
239 for (i = 0; i < x4; i++) {
240 edges[i].ptr = hash_d[i].ptr += MAXCHUNKSIZE;
241 hash_d[i].blocks -= MAXCHUNKSIZE / 64;
242 edges[i].blocks = MAXCHUNKSIZE / 64;
243 ciph_d[i].inp += MAXCHUNKSIZE;
244 ciph_d[i].out += MAXCHUNKSIZE;
245 ciph_d[i].blocks = MAXCHUNKSIZE / 16;
246 memcpy(ciph_d[i].iv, ciph_d[i].out - 16, 16);
248 processed += MAXCHUNKSIZE;
249 minblocks -= MAXCHUNKSIZE / 64;
250 } while (minblocks > MAXCHUNKSIZE / 64);
254 sha1_multi_block(mctx, hash_d, n4x);
256 memset(blocks, 0, sizeof(blocks));
257 for (i = 0; i < x4; i++) {
258 unsigned int len = (i == (x4 - 1) ? last : frag),
259 off = hash_d[i].blocks * 64;
260 const unsigned char *ptr = hash_d[i].ptr + off;
262 off = (len - processed) - (64 - 13) - off; /* remainder actually */
263 memcpy(blocks[i].c, ptr, off);
264 blocks[i].c[off] = 0x80;
265 len += 64 + 13; /* 64 is HMAC header */
266 len *= 8; /* convert to bits */
267 if (off < (64 - 8)) {
269 blocks[i].d[15] = BSWAP4(len);
271 PUTU32(blocks[i].c + 60, len);
276 blocks[i].d[31] = BSWAP4(len);
278 PUTU32(blocks[i].c + 124, len);
282 edges[i].ptr = blocks[i].c;
285 /* hash input tails and finalize */
286 sha1_multi_block(mctx, edges, n4x);
288 memset(blocks, 0, sizeof(blocks));
289 for (i = 0; i < x4; i++) {
291 blocks[i].d[0] = BSWAP4(mctx->A[i]);
292 mctx->A[i] = sctx->tail.h0;
293 blocks[i].d[1] = BSWAP4(mctx->B[i]);
294 mctx->B[i] = sctx->tail.h1;
295 blocks[i].d[2] = BSWAP4(mctx->C[i]);
296 mctx->C[i] = sctx->tail.h2;
297 blocks[i].d[3] = BSWAP4(mctx->D[i]);
298 mctx->D[i] = sctx->tail.h3;
299 blocks[i].d[4] = BSWAP4(mctx->E[i]);
300 mctx->E[i] = sctx->tail.h4;
301 blocks[i].c[20] = 0x80;
302 blocks[i].d[15] = BSWAP4((64 + 20) * 8);
304 PUTU32(blocks[i].c + 0, mctx->A[i]);
305 mctx->A[i] = sctx->tail.h0;
306 PUTU32(blocks[i].c + 4, mctx->B[i]);
307 mctx->B[i] = sctx->tail.h1;
308 PUTU32(blocks[i].c + 8, mctx->C[i]);
309 mctx->C[i] = sctx->tail.h2;
310 PUTU32(blocks[i].c + 12, mctx->D[i]);
311 mctx->D[i] = sctx->tail.h3;
312 PUTU32(blocks[i].c + 16, mctx->E[i]);
313 mctx->E[i] = sctx->tail.h4;
314 blocks[i].c[20] = 0x80;
315 PUTU32(blocks[i].c + 60, (64 + 20) * 8);
317 edges[i].ptr = blocks[i].c;
322 sha1_multi_block(mctx, edges, n4x);
324 for (i = 0; i < x4; i++) {
325 unsigned int len = (i == (x4 - 1) ? last : frag), pad, j;
326 unsigned char *out0 = out;
328 memcpy(ciph_d[i].out, ciph_d[i].inp, len - processed);
329 ciph_d[i].inp = ciph_d[i].out;
334 PUTU32(out + 0, mctx->A[i]);
335 PUTU32(out + 4, mctx->B[i]);
336 PUTU32(out + 8, mctx->C[i]);
337 PUTU32(out + 12, mctx->D[i]);
338 PUTU32(out + 16, mctx->E[i]);
344 for (j = 0; j <= pad; j++)
348 ciph_d[i].blocks = (len - processed) / 16;
349 len += 16; /* account for explicit iv */
352 out0[0] = ((u8 *)sctx->md.data)[8];
353 out0[1] = ((u8 *)sctx->md.data)[9];
354 out0[2] = ((u8 *)sctx->md.data)[10];
355 out0[3] = (u8)(len >> 8);
362 aesni_multi_cbc_encrypt(ciph_d, &ctx->ks, n4x);
364 OPENSSL_cleanse(blocks, sizeof(blocks));
365 OPENSSL_cleanse(mctx, sizeof(*mctx));
367 ctx->multiblock_encrypt_len = ret;
370 # endif /* OPENSSL_NO_MULTIBLOCK */
372 static int aesni_cbc_hmac_sha1_cipher(PROV_CIPHER_CTX *vctx,
374 const unsigned char *in, size_t len)
376 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
377 PROV_AES_HMAC_SHA1_CTX *sctx = (PROV_AES_HMAC_SHA1_CTX *)vctx;
379 size_t plen = ctx->payload_length;
380 size_t iv = 0; /* explicit IV in TLS 1.1 and later */
381 size_t aes_off = 0, blocks;
382 size_t sha_off = SHA_CBLOCK - sctx->md.num;
384 ctx->payload_length = NO_PAYLOAD_LENGTH;
386 if (len % AES_BLOCK_SIZE)
390 if (plen == NO_PAYLOAD_LENGTH)
393 ((plen + SHA_DIGEST_LENGTH +
394 AES_BLOCK_SIZE) & -AES_BLOCK_SIZE))
396 else if (ctx->aux.tls_ver >= TLS1_1_VERSION)
399 if (plen > (sha_off + iv)
400 && (blocks = (plen - (sha_off + iv)) / SHA_CBLOCK)) {
401 sha1_update(&sctx->md, in + iv, sha_off);
403 aesni_cbc_sha1_enc(in, out, blocks, &ctx->ks, ctx->base.iv,
404 &sctx->md, in + iv + sha_off);
405 blocks *= SHA_CBLOCK;
408 sctx->md.Nh += blocks >> 29;
409 sctx->md.Nl += blocks <<= 3;
410 if (sctx->md.Nl < (unsigned int)blocks)
416 sha1_update(&sctx->md, in + sha_off, plen - sha_off);
418 if (plen != len) { /* "TLS" mode of operation */
420 memcpy(out + aes_off, in + aes_off, plen - aes_off);
422 /* calculate HMAC and append it to payload */
423 SHA1_Final(out + plen, &sctx->md);
424 sctx->md = sctx->tail;
425 sha1_update(&sctx->md, out + plen, SHA_DIGEST_LENGTH);
426 SHA1_Final(out + plen, &sctx->md);
428 /* pad the payload|hmac */
429 plen += SHA_DIGEST_LENGTH;
430 for (l = len - plen - 1; plen < len; plen++)
432 /* encrypt HMAC|padding at once */
433 aesni_cbc_encrypt(out + aes_off, out + aes_off, len - aes_off,
434 &ctx->ks, ctx->base.iv, 1);
436 aesni_cbc_encrypt(in + aes_off, out + aes_off, len - aes_off,
437 &ctx->ks, ctx->base.iv, 1);
441 unsigned int u[SHA_DIGEST_LENGTH / sizeof(unsigned int)];
442 unsigned char c[32 + SHA_DIGEST_LENGTH];
445 /* arrange cache line alignment */
446 pmac = (void *)(((size_t)mac.c + 31) & ((size_t)0 - 32));
448 if (plen != NO_PAYLOAD_LENGTH) { /* "TLS" mode of operation */
449 size_t inp_len, mask, j, i;
450 unsigned int res, maxpad, pad, bitlen;
453 unsigned int u[SHA_LBLOCK];
454 unsigned char c[SHA_CBLOCK];
455 } *data = (void *)sctx->md.data;
457 if ((ctx->aux.tls_aad[plen - 4] << 8 | ctx->aux.tls_aad[plen - 3])
459 if (len < (AES_BLOCK_SIZE + SHA_DIGEST_LENGTH + 1))
462 /* omit explicit iv */
463 memcpy(ctx->base.iv, in, AES_BLOCK_SIZE);
465 in += AES_BLOCK_SIZE;
466 out += AES_BLOCK_SIZE;
467 len -= AES_BLOCK_SIZE;
468 } else if (len < (SHA_DIGEST_LENGTH + 1))
471 /* decrypt HMAC|padding at once */
472 aesni_cbc_encrypt(in, out, len, &ctx->ks, ctx->base.iv, 0);
474 /* figure out payload length */
476 maxpad = len - (SHA_DIGEST_LENGTH + 1);
477 maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8);
480 mask = constant_time_ge(maxpad, pad);
483 * If pad is invalid then we will fail the above test but we must
484 * continue anyway because we are in constant time code. However,
485 * we'll use the maxpad value instead of the supplied pad to make
486 * sure we perform well defined pointer arithmetic.
488 pad = constant_time_select(mask, pad, maxpad);
490 inp_len = len - (SHA_DIGEST_LENGTH + pad + 1);
492 ctx->aux.tls_aad[plen - 2] = inp_len >> 8;
493 ctx->aux.tls_aad[plen - 1] = inp_len;
496 sctx->md = sctx->head;
497 sha1_update(&sctx->md, ctx->aux.tls_aad, plen);
499 /* code containing lucky-13 fix */
500 len -= SHA_DIGEST_LENGTH; /* amend mac */
501 if (len >= (256 + SHA_CBLOCK)) {
502 j = (len - (256 + SHA_CBLOCK)) & (0 - SHA_CBLOCK);
503 j += SHA_CBLOCK - sctx->md.num;
504 sha1_update(&sctx->md, out, j);
510 /* but pretend as if we hashed padded payload */
511 bitlen = sctx->md.Nl + (inp_len << 3); /* at most 18 bits */
513 bitlen = BSWAP4(bitlen);
516 mac.c[1] = (unsigned char)(bitlen >> 16);
517 mac.c[2] = (unsigned char)(bitlen >> 8);
518 mac.c[3] = (unsigned char)bitlen;
528 for (res = sctx->md.num, j = 0; j < len; j++) {
530 mask = (j - inp_len) >> (sizeof(j) * 8 - 8);
532 c |= 0x80 & ~mask & ~((inp_len - j) >> (sizeof(j) * 8 - 8));
533 data->c[res++] = (unsigned char)c;
535 if (res != SHA_CBLOCK)
538 /* j is not incremented yet */
539 mask = 0 - ((inp_len + 7 - j) >> (sizeof(j) * 8 - 1));
540 data->u[SHA_LBLOCK - 1] |= bitlen & mask;
541 sha1_block_data_order(&sctx->md, data, 1);
542 mask &= 0 - ((j - inp_len - 72) >> (sizeof(j) * 8 - 1));
543 pmac->u[0] |= sctx->md.h0 & mask;
544 pmac->u[1] |= sctx->md.h1 & mask;
545 pmac->u[2] |= sctx->md.h2 & mask;
546 pmac->u[3] |= sctx->md.h3 & mask;
547 pmac->u[4] |= sctx->md.h4 & mask;
551 for (i = res; i < SHA_CBLOCK; i++, j++)
554 if (res > SHA_CBLOCK - 8) {
555 mask = 0 - ((inp_len + 8 - j) >> (sizeof(j) * 8 - 1));
556 data->u[SHA_LBLOCK - 1] |= bitlen & mask;
557 sha1_block_data_order(&sctx->md, data, 1);
558 mask &= 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1));
559 pmac->u[0] |= sctx->md.h0 & mask;
560 pmac->u[1] |= sctx->md.h1 & mask;
561 pmac->u[2] |= sctx->md.h2 & mask;
562 pmac->u[3] |= sctx->md.h3 & mask;
563 pmac->u[4] |= sctx->md.h4 & mask;
565 memset(data, 0, SHA_CBLOCK);
568 data->u[SHA_LBLOCK - 1] = bitlen;
569 sha1_block_data_order(&sctx->md, data, 1);
570 mask = 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1));
571 pmac->u[0] |= sctx->md.h0 & mask;
572 pmac->u[1] |= sctx->md.h1 & mask;
573 pmac->u[2] |= sctx->md.h2 & mask;
574 pmac->u[3] |= sctx->md.h3 & mask;
575 pmac->u[4] |= sctx->md.h4 & mask;
578 pmac->u[0] = BSWAP4(pmac->u[0]);
579 pmac->u[1] = BSWAP4(pmac->u[1]);
580 pmac->u[2] = BSWAP4(pmac->u[2]);
581 pmac->u[3] = BSWAP4(pmac->u[3]);
582 pmac->u[4] = BSWAP4(pmac->u[4]);
584 for (i = 0; i < 5; i++) {
586 pmac->c[4 * i + 0] = (unsigned char)(res >> 24);
587 pmac->c[4 * i + 1] = (unsigned char)(res >> 16);
588 pmac->c[4 * i + 2] = (unsigned char)(res >> 8);
589 pmac->c[4 * i + 3] = (unsigned char)res;
592 len += SHA_DIGEST_LENGTH;
593 sctx->md = sctx->tail;
594 sha1_update(&sctx->md, pmac->c, SHA_DIGEST_LENGTH);
595 SHA1_Final(pmac->c, &sctx->md);
600 /* version of code with lucky-13 fix */
602 unsigned char *p = out + len - 1 - maxpad - SHA_DIGEST_LENGTH;
603 size_t off = out - p;
604 unsigned int c, cmask;
606 maxpad += SHA_DIGEST_LENGTH;
607 for (res = 0, i = 0, j = 0; j < maxpad; j++) {
610 ((int)(j - off - SHA_DIGEST_LENGTH)) >> (sizeof(int) *
612 res |= (c ^ pad) & ~cmask; /* ... and padding */
613 cmask &= ((int)(off - 1 - j)) >> (sizeof(int) * 8 - 1);
614 res |= (c ^ pmac->c[i]) & cmask;
617 maxpad -= SHA_DIGEST_LENGTH;
619 res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1));
624 /* decrypt HMAC|padding at once */
625 aesni_cbc_encrypt(in, out, len, &ctx->ks, ctx->base.iv, 0);
626 sha1_update(&sctx->md, out, len);
633 /* EVP_CTRL_AEAD_SET_MAC_KEY */
634 static void aesni_cbc_hmac_sha1_set_mac_key(void *vctx,
635 const unsigned char *mac, size_t len)
637 PROV_AES_HMAC_SHA1_CTX *ctx = (PROV_AES_HMAC_SHA1_CTX *)vctx;
639 unsigned char hmac_key[64];
641 memset(hmac_key, 0, sizeof(hmac_key));
643 if (len > (int)sizeof(hmac_key)) {
644 SHA1_Init(&ctx->head);
645 sha1_update(&ctx->head, mac, len);
646 SHA1_Final(hmac_key, &ctx->head);
648 memcpy(hmac_key, mac, len);
651 for (i = 0; i < sizeof(hmac_key); i++)
652 hmac_key[i] ^= 0x36; /* ipad */
653 SHA1_Init(&ctx->head);
654 sha1_update(&ctx->head, hmac_key, sizeof(hmac_key));
656 for (i = 0; i < sizeof(hmac_key); i++)
657 hmac_key[i] ^= 0x36 ^ 0x5c; /* opad */
658 SHA1_Init(&ctx->tail);
659 sha1_update(&ctx->tail, hmac_key, sizeof(hmac_key));
661 OPENSSL_cleanse(hmac_key, sizeof(hmac_key));
664 /* EVP_CTRL_AEAD_TLS1_AAD */
665 static int aesni_cbc_hmac_sha1_set_tls1_aad(void *vctx,
666 unsigned char *aad_rec, int aad_len)
668 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
669 PROV_AES_HMAC_SHA1_CTX *sctx = (PROV_AES_HMAC_SHA1_CTX *)vctx;
670 unsigned char *p = aad_rec;
673 if (aad_len != EVP_AEAD_TLS1_AAD_LEN)
676 len = p[aad_len - 2] << 8 | p[aad_len - 1];
679 ctx->payload_length = len;
680 if ((ctx->aux.tls_ver =
681 p[aad_len - 4] << 8 | p[aad_len - 3]) >= TLS1_1_VERSION) {
682 if (len < AES_BLOCK_SIZE)
684 len -= AES_BLOCK_SIZE;
685 p[aad_len - 2] = len >> 8;
686 p[aad_len - 1] = len;
688 sctx->md = sctx->head;
689 sha1_update(&sctx->md, p, aad_len);
690 ctx->tls_aad_pad = (int)(((len + SHA_DIGEST_LENGTH +
691 AES_BLOCK_SIZE) & -AES_BLOCK_SIZE)
695 memcpy(ctx->aux.tls_aad, aad_rec, aad_len);
696 ctx->payload_length = aad_len;
697 ctx->tls_aad_pad = SHA_DIGEST_LENGTH;
702 # if !defined(OPENSSL_NO_MULTIBLOCK)
704 /* EVP_CTRL_TLS1_1_MULTIBLOCK_MAX_BUFSIZE */
705 static int aesni_cbc_hmac_sha1_tls1_multiblock_max_bufsize(void *vctx)
707 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
709 OPENSSL_assert(ctx->multiblock_max_send_fragment != 0);
711 + (((int)ctx->multiblock_max_send_fragment + 20 + 16) & -16));
714 /* EVP_CTRL_TLS1_1_MULTIBLOCK_AAD */
715 static int aesni_cbc_hmac_sha1_tls1_multiblock_aad(
716 void *vctx, EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param)
718 PROV_AES_HMAC_SHA_CTX *ctx = (PROV_AES_HMAC_SHA_CTX *)vctx;
719 PROV_AES_HMAC_SHA1_CTX *sctx = (PROV_AES_HMAC_SHA1_CTX *)vctx;
720 unsigned int n4x = 1, x4;
721 unsigned int frag, last, packlen, inp_len;
723 inp_len = param->inp[11] << 8 | param->inp[12];
724 ctx->multiblock_interleave = param->interleave;
727 if ((param->inp[9] << 8 | param->inp[10]) < TLS1_1_VERSION)
732 return 0; /* too short */
734 if (inp_len >= 8192 && OPENSSL_ia32cap_P[2] & (1 << 5))
736 } else if ((n4x = param->interleave / 4) && n4x <= 2)
737 inp_len = param->len;
741 sctx->md = sctx->head;
742 sha1_update(&sctx->md, param->inp, 13);
747 frag = inp_len >> n4x;
748 last = inp_len + frag - (frag << n4x);
749 if (last > frag && ((last + 13 + 9) % 64 < (x4 - 1))) {
754 packlen = 5 + 16 + ((frag + 20 + 16) & -16);
755 packlen = (packlen << n4x) - packlen;
756 packlen += 5 + 16 + ((last + 20 + 16) & -16);
758 param->interleave = x4;
759 /* The returned values used by get need to be stored */
760 ctx->multiblock_interleave = x4;
761 ctx->multiblock_aad_packlen = packlen;
764 return -1; /* not yet */
767 /* EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT */
768 static int aesni_cbc_hmac_sha1_tls1_multiblock_encrypt(
769 void *ctx, EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param)
771 return (int)tls1_multi_block_encrypt(ctx, param->out,
772 param->inp, param->len,
773 param->interleave / 4);
776 # endif /* OPENSSL_NO_MULTIBLOCK */
778 static const PROV_CIPHER_HW_AES_HMAC_SHA cipher_hw_aes_hmac_sha1 = {
780 aesni_cbc_hmac_sha1_init_key,
781 aesni_cbc_hmac_sha1_cipher
783 aesni_cbc_hmac_sha1_set_mac_key,
784 aesni_cbc_hmac_sha1_set_tls1_aad,
785 # if !defined(OPENSSL_NO_MULTIBLOCK)
786 aesni_cbc_hmac_sha1_tls1_multiblock_max_bufsize,
787 aesni_cbc_hmac_sha1_tls1_multiblock_aad,
788 aesni_cbc_hmac_sha1_tls1_multiblock_encrypt
792 const PROV_CIPHER_HW_AES_HMAC_SHA *ossl_prov_cipher_hw_aes_cbc_hmac_sha1(void)
794 return &cipher_hw_aes_hmac_sha1;
797 #endif /* !defined(AES_CBC_HMAC_SHA_CAPABLE) || !defined(AESNI_CAPABLE) */