2 * Copyright 2018-2022 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 #include <openssl/evp.h>
11 #include <openssl/core_names.h>
12 #include <openssl/rand.h>
13 #include "../../ssl_local.h"
14 #include "../record_local.h"
15 #include "recmethod_local.h"
16 #include "internal/ktls.h"
18 static struct record_functions_st ossl_ktls_funcs;
20 #if defined(__FreeBSD__)
21 # include "crypto/cryptodev.h"
24 * Check if a given cipher is supported by the KTLS interface.
25 * The kernel might still fail the setsockopt() if no suitable
26 * provider is found, but this checks if the socket option
27 * supports the cipher suite used at all.
29 static int ktls_int_check_supported_cipher(OSSL_RECORD_LAYER *rl,
34 switch (rl->version) {
38 #ifdef OPENSSL_KTLS_TLS13
46 if (EVP_CIPHER_is_a(c, "AES-128-GCM")
47 || EVP_CIPHER_is_a(c, "AES-256-GCM")
48 # ifdef OPENSSL_KTLS_CHACHA20_POLY1305
49 || EVP_CIPHER_is_a(c, "CHACHA20-POLY1305")
54 if (!EVP_CIPHER_is_a(c, "AES-128-CBC")
55 && !EVP_CIPHER_is_a(c, "AES-256-CBC"))
64 if (EVP_MD_is_a(md, "SHA1")
65 || EVP_MD_is_a(md, "SHA2-256")
66 || EVP_MD_is_a(md, "SHA2-384"))
72 /* Function to configure kernel TLS structure */
74 int ktls_configure_crypto(OSSL_LIB_CTX *libctx, int version, const EVP_CIPHER *c,
75 EVP_MD *md, void *rl_sequence,
76 ktls_crypto_info_t *crypto_info, int is_tx,
77 unsigned char *iv, size_t ivlen,
78 unsigned char *key, size_t keylen,
79 unsigned char *mac_key, size_t mac_secret_size)
81 memset(crypto_info, 0, sizeof(*crypto_info));
82 if (EVP_CIPHER_is_a(c, "AES-128-GCM")
83 || EVP_CIPHER_is_a(c, "AES-256-GCM")) {
84 crypto_info->cipher_algorithm = CRYPTO_AES_NIST_GCM_16;
85 crypto_info->iv_len = ivlen;
87 # ifdef OPENSSL_KTLS_CHACHA20_POLY1305
88 if (EVP_CIPHER_is_a(c, "CHACHA20-POLY1305")) {
89 crypto_info->cipher_algorithm = CRYPTO_CHACHA20_POLY1305;
90 crypto_info->iv_len = ivlen;
93 if (EVP_CIPHER_is_a(c, "AES-128-CBC") || EVP_CIPHER_is_a(c, "AES-256-CBC")) {
96 if (EVP_MD_is_a(md, "SHA1"))
97 crypto_info->auth_algorithm = CRYPTO_SHA1_HMAC;
98 else if (EVP_MD_is_a(md, "SHA2-256")) {
99 crypto_info->auth_algorithm = CRYPTO_SHA2_256_HMAC;
100 else if (EVP_MD_is_a(md, "SHA2-384"))
101 crypto_info->auth_algorithm = CRYPTO_SHA2_384_HMAC;
104 crypto_info->cipher_algorithm = CRYPTO_AES_CBC;
105 crypto_info->iv_len = ivlen;
106 crypto_info->auth_key = mac_key;
107 crypto_info->auth_key_len = mac_secret_size;
111 crypto_info->cipher_key = key;
112 crypto_info->cipher_key_len = keylen;
113 crypto_info->iv = iv;
114 crypto_info->tls_vmajor = (version >> 8) & 0x000000ff;
115 crypto_info->tls_vminor = (version & 0x000000ff);
116 # ifdef TCP_RXTLS_ENABLE
117 memcpy(crypto_info->rec_seq, rl_sequence, sizeof(crypto_info->rec_seq));
125 #endif /* __FreeBSD__ */
127 #if defined(OPENSSL_SYS_LINUX)
128 /* Function to check supported ciphers in Linux */
129 static int ktls_int_check_supported_cipher(OSSL_RECORD_LAYER *rl,
134 switch (rl->version) {
136 #ifdef OPENSSL_KTLS_TLS13
145 * Check that cipher is AES_GCM_128, AES_GCM_256, AES_CCM_128
146 * or Chacha20-Poly1305
148 # ifdef OPENSSL_KTLS_AES_CCM_128
149 if (EVP_CIPHER_is_a(c, "AES-128-CCM")) {
150 if (taglen != EVP_CCM_TLS_TAG_LEN)
156 # ifdef OPENSSL_KTLS_AES_GCM_128
157 || EVP_CIPHER_is_a(c, "AES-128-GCM")
159 # ifdef OPENSSL_KTLS_AES_GCM_256
160 || EVP_CIPHER_is_a(c, "AES-256-GCM")
162 # ifdef OPENSSL_KTLS_CHACHA20_POLY1305
163 || EVP_CIPHER_is_a(c, "ChaCha20-Poly1305")
171 /* Function to configure kernel TLS structure */
173 int ktls_configure_crypto(OSSL_LIB_CTX *libctx, int version, const EVP_CIPHER *c,
174 const EVP_MD *md, void *rl_sequence,
175 ktls_crypto_info_t *crypto_info, int is_tx,
176 unsigned char *iv, size_t ivlen,
177 unsigned char *key, size_t keylen,
178 unsigned char *mac_key, size_t mac_secret_size)
180 unsigned char geniv[EVP_GCM_TLS_EXPLICIT_IV_LEN];
181 unsigned char *eiv = NULL;
183 # ifdef OPENSSL_NO_KTLS_RX
188 if (EVP_CIPHER_get_mode(c) == EVP_CIPH_GCM_MODE
189 || EVP_CIPHER_get_mode(c) == EVP_CIPH_CCM_MODE) {
190 if (!ossl_assert(EVP_GCM_TLS_FIXED_IV_LEN == EVP_CCM_TLS_FIXED_IV_LEN)
191 || !ossl_assert(EVP_GCM_TLS_EXPLICIT_IV_LEN
192 == EVP_CCM_TLS_EXPLICIT_IV_LEN))
194 if (version == TLS1_2_VERSION) {
195 if (!ossl_assert(ivlen == EVP_GCM_TLS_FIXED_IV_LEN))
198 if (RAND_bytes_ex(libctx, geniv,
199 EVP_GCM_TLS_EXPLICIT_IV_LEN, 0) <= 0)
202 memset(geniv, 0, EVP_GCM_TLS_EXPLICIT_IV_LEN);
206 if (!ossl_assert(ivlen == EVP_GCM_TLS_FIXED_IV_LEN
207 + EVP_GCM_TLS_EXPLICIT_IV_LEN))
209 eiv = iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE;
213 memset(crypto_info, 0, sizeof(*crypto_info));
214 switch (EVP_CIPHER_get_nid(c)) {
215 # ifdef OPENSSL_KTLS_AES_GCM_128
216 case NID_aes_128_gcm:
217 if (!ossl_assert(TLS_CIPHER_AES_GCM_128_SALT_SIZE
218 == EVP_GCM_TLS_FIXED_IV_LEN)
219 || !ossl_assert(TLS_CIPHER_AES_GCM_128_IV_SIZE
220 == EVP_GCM_TLS_EXPLICIT_IV_LEN))
222 crypto_info->gcm128.info.cipher_type = TLS_CIPHER_AES_GCM_128;
223 crypto_info->gcm128.info.version = version;
224 crypto_info->tls_crypto_info_len = sizeof(crypto_info->gcm128);
225 memcpy(crypto_info->gcm128.iv, eiv, TLS_CIPHER_AES_GCM_128_IV_SIZE);
226 memcpy(crypto_info->gcm128.salt, iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
227 memcpy(crypto_info->gcm128.key, key, keylen);
228 memcpy(crypto_info->gcm128.rec_seq, rl_sequence,
229 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
232 # ifdef OPENSSL_KTLS_AES_GCM_256
233 case NID_aes_256_gcm:
234 if (!ossl_assert(TLS_CIPHER_AES_GCM_256_SALT_SIZE
235 == EVP_GCM_TLS_FIXED_IV_LEN)
236 || !ossl_assert(TLS_CIPHER_AES_GCM_256_IV_SIZE
237 == EVP_GCM_TLS_EXPLICIT_IV_LEN))
239 crypto_info->gcm256.info.cipher_type = TLS_CIPHER_AES_GCM_256;
240 crypto_info->gcm256.info.version = version;
241 crypto_info->tls_crypto_info_len = sizeof(crypto_info->gcm256);
242 memcpy(crypto_info->gcm256.iv, eiv, TLS_CIPHER_AES_GCM_256_IV_SIZE);
243 memcpy(crypto_info->gcm256.salt, iv, TLS_CIPHER_AES_GCM_256_SALT_SIZE);
244 memcpy(crypto_info->gcm256.key, key, keylen);
245 memcpy(crypto_info->gcm256.rec_seq, rl_sequence,
246 TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
250 # ifdef OPENSSL_KTLS_AES_CCM_128
251 case NID_aes_128_ccm:
252 if (!ossl_assert(TLS_CIPHER_AES_CCM_128_SALT_SIZE
253 == EVP_CCM_TLS_FIXED_IV_LEN)
254 || !ossl_assert(TLS_CIPHER_AES_CCM_128_IV_SIZE
255 == EVP_CCM_TLS_EXPLICIT_IV_LEN))
257 crypto_info->ccm128.info.cipher_type = TLS_CIPHER_AES_CCM_128;
258 crypto_info->ccm128.info.version = version;
259 crypto_info->tls_crypto_info_len = sizeof(crypto_info->ccm128);
260 memcpy(crypto_info->ccm128.iv, eiv, TLS_CIPHER_AES_CCM_128_IV_SIZE);
261 memcpy(crypto_info->ccm128.salt, iv, TLS_CIPHER_AES_CCM_128_SALT_SIZE);
262 memcpy(crypto_info->ccm128.key, key, keylen);
263 memcpy(crypto_info->ccm128.rec_seq, rl_sequence,
264 TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE);
267 # ifdef OPENSSL_KTLS_CHACHA20_POLY1305
268 case NID_chacha20_poly1305:
269 if (!ossl_assert(ivlen == TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE))
271 crypto_info->chacha20poly1305.info.cipher_type
272 = TLS_CIPHER_CHACHA20_POLY1305;
273 crypto_info->chacha20poly1305.info.version = version;
274 crypto_info->tls_crypto_info_len = sizeof(crypto_info->chacha20poly1305);
275 memcpy(crypto_info->chacha20poly1305.iv, iv, ivlen);
276 memcpy(crypto_info->chacha20poly1305.key, key, keylen);
277 memcpy(crypto_info->chacha20poly1305.rec_seq, rl_sequence,
278 TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE);
287 #endif /* OPENSSL_SYS_LINUX */
289 static int ktls_set_crypto_state(OSSL_RECORD_LAYER *rl, int level,
290 unsigned char *key, size_t keylen,
291 unsigned char *iv, size_t ivlen,
292 unsigned char *mackey, size_t mackeylen,
293 const EVP_CIPHER *ciph,
299 ktls_crypto_info_t crypto_info;
302 * Check if we are suitable for KTLS. If not suitable we return
303 * OSSL_RECORD_RETURN_NON_FATAL_ERR so that other record layers can be tried
308 return OSSL_RECORD_RETURN_NON_FATAL_ERR;
310 /* ktls supports only the maximum fragment size */
311 if (rl->max_frag_len != SSL3_RT_MAX_PLAIN_LENGTH)
312 return OSSL_RECORD_RETURN_NON_FATAL_ERR;
314 /* check that cipher is supported */
315 if (!ktls_int_check_supported_cipher(rl, ciph, md, taglen))
316 return OSSL_RECORD_RETURN_NON_FATAL_ERR;
318 /* All future data will get encrypted by ktls. Flush the BIO or skip ktls */
319 if (rl->direction == OSSL_RECORD_DIRECTION_WRITE) {
320 if (BIO_flush(rl->bio) <= 0)
321 return OSSL_RECORD_RETURN_NON_FATAL_ERR;
323 /* KTLS does not support record padding */
324 if (rl->padding != NULL || rl->block_padding > 0)
325 return OSSL_RECORD_RETURN_NON_FATAL_ERR;
328 if (!ktls_configure_crypto(rl->libctx, rl->version, ciph, md, rl->sequence,
330 rl->direction == OSSL_RECORD_DIRECTION_WRITE,
331 iv, ivlen, key, keylen, mackey, mackeylen))
332 return OSSL_RECORD_RETURN_NON_FATAL_ERR;
334 if (!BIO_set_ktls(rl->bio, &crypto_info, rl->direction))
335 return OSSL_RECORD_RETURN_NON_FATAL_ERR;
337 if (rl->direction == OSSL_RECORD_DIRECTION_WRITE &&
338 (rl->options & SSL_OP_ENABLE_KTLS_TX_ZEROCOPY_SENDFILE) != 0)
339 /* Ignore errors. The application opts in to using the zerocopy
340 * optimization. If the running kernel doesn't support it, just
341 * continue without the optimization.
343 BIO_set_ktls_tx_zerocopy_sendfile(rl->bio);
345 return OSSL_RECORD_RETURN_SUCCESS;
348 static int ktls_read_n(OSSL_RECORD_LAYER *rl, size_t n, size_t max, int extend,
349 int clearold, size_t *readbytes)
353 ret = tls_default_read_n(rl, n, max, extend, clearold, readbytes);
355 if (ret < OSSL_RECORD_RETURN_RETRY) {
358 RLAYERfatal(rl, SSL_AD_BAD_RECORD_MAC,
359 SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC);
362 RLAYERfatal(rl, SSL_AD_RECORD_OVERFLOW,
363 SSL_R_PACKET_LENGTH_TOO_LONG);
366 RLAYERfatal(rl, SSL_AD_PROTOCOL_VERSION,
367 SSL_R_WRONG_VERSION_NUMBER);
377 static int ktls_cipher(OSSL_RECORD_LAYER *rl, TLS_RL_RECORD *inrecs,
378 size_t n_recs, int sending, SSL_MAC_BUF *mac,
384 static int ktls_validate_record_header(OSSL_RECORD_LAYER *rl, TLS_RL_RECORD *rec)
386 if (rec->rec_version != TLS1_2_VERSION) {
387 RLAYERfatal(rl, SSL_AD_DECODE_ERROR, SSL_R_WRONG_VERSION_NUMBER);
394 static int ktls_post_process_record(OSSL_RECORD_LAYER *rl, TLS_RL_RECORD *rec)
396 if (rl->version == TLS1_3_VERSION)
397 return tls13_common_post_process_record(rl, rec);
403 ktls_new_record_layer(OSSL_LIB_CTX *libctx, const char *propq, int vers,
404 int role, int direction, int level, uint16_t epoch,
405 unsigned char *key, size_t keylen, unsigned char *iv,
406 size_t ivlen, unsigned char *mackey, size_t mackeylen,
407 const EVP_CIPHER *ciph, size_t taglen,
409 const EVP_MD *md, COMP_METHOD *comp, BIO *prev,
410 BIO *transport, BIO *next, BIO_ADDR *local, BIO_ADDR *peer,
411 const OSSL_PARAM *settings, const OSSL_PARAM *options,
412 const OSSL_DISPATCH *fns, void *cbarg,
413 OSSL_RECORD_LAYER **retrl)
417 ret = tls_int_new_record_layer(libctx, propq, vers, role, direction, level,
418 key, keylen, iv, ivlen, mackey, mackeylen,
419 ciph, taglen, mactype, md, comp, prev,
420 transport, next, local, peer, settings,
421 options, fns, cbarg, retrl);
423 if (ret != OSSL_RECORD_RETURN_SUCCESS)
426 (*retrl)->funcs = &ossl_ktls_funcs;
428 ret = (*retrl)->funcs->set_crypto_state(*retrl, level, key, keylen, iv,
429 ivlen, mackey, mackeylen, ciph,
430 taglen, mactype, md, comp);
432 if (ret != OSSL_RECORD_RETURN_SUCCESS) {
433 OPENSSL_free(*retrl);
437 * With KTLS we always try and read as much as possible and fill the
440 (*retrl)->read_ahead = 1;
445 static int ktls_allocate_write_buffers(OSSL_RECORD_LAYER *rl,
446 OSSL_RECORD_TEMPLATE *templates,
447 size_t numtempl, size_t *prefix)
449 if (!ossl_assert(numtempl == 1))
453 * We just use the end application buffer in the case of KTLS, so nothing
454 * to do. We pretend we set up one buffer.
461 static int ktls_initialise_write_packets(OSSL_RECORD_LAYER *rl,
462 OSSL_RECORD_TEMPLATE *templates,
464 OSSL_RECORD_TEMPLATE *prefixtempl,
472 * We just use the application buffer directly and don't use any WPACKET
476 wb->type = templates[0].type;
479 * ktls doesn't modify the buffer, but to avoid a warning we need
480 * to discard the const qualifier.
481 * This doesn't leak memory because the buffers have never been allocated
484 TLS_BUFFER_set_buf(wb, (unsigned char *)templates[0].buf);
485 TLS_BUFFER_set_offset(wb, 0);
486 TLS_BUFFER_set_app_buffer(wb, 1);
491 static int ktls_prepare_record_header(OSSL_RECORD_LAYER *rl,
493 OSSL_RECORD_TEMPLATE *templ,
494 unsigned int rectype,
495 unsigned char **recdata)
497 /* The kernel writes the record header, so nothing to do */
503 static int ktls_prepare_for_encryption(OSSL_RECORD_LAYER *rl,
506 TLS_RL_RECORD *thiswr)
508 /* No encryption, so nothing to do */
512 static int ktls_post_encryption_processing(OSSL_RECORD_LAYER *rl,
514 OSSL_RECORD_TEMPLATE *templ,
516 TLS_RL_RECORD *thiswr)
518 /* The kernel does anything that is needed, so nothing to do here */
522 static int ktls_prepare_write_bio(OSSL_RECORD_LAYER *rl, int type)
525 * To prevent coalescing of control and data messages,
526 * such as in buffer_write, we flush the BIO
528 if (type != SSL3_RT_APPLICATION_DATA) {
529 int ret, i = BIO_flush(rl->bio);
532 if (BIO_should_retry(rl->bio))
533 ret = OSSL_RECORD_RETURN_RETRY;
535 ret = OSSL_RECORD_RETURN_FATAL;
538 BIO_set_ktls_ctrl_msg(rl->bio, type);
541 return OSSL_RECORD_RETURN_SUCCESS;
544 static int ktls_alloc_buffers(OSSL_RECORD_LAYER *rl)
546 /* We use the application buffer directly for writing */
547 if (rl->direction == OSSL_RECORD_DIRECTION_WRITE)
550 return tls_alloc_buffers(rl);
553 static int ktls_free_buffers(OSSL_RECORD_LAYER *rl)
555 /* We use the application buffer directly for writing */
556 if (rl->direction == OSSL_RECORD_DIRECTION_WRITE)
559 return tls_free_buffers(rl);
562 static struct record_functions_st ossl_ktls_funcs = {
563 ktls_set_crypto_state,
566 tls_default_set_protocol_version,
568 tls_get_more_records,
569 ktls_validate_record_header,
570 ktls_post_process_record,
571 tls_get_max_records_default,
572 tls_write_records_default,
573 ktls_allocate_write_buffers,
574 ktls_initialise_write_packets,
576 ktls_prepare_record_header,
578 ktls_prepare_for_encryption,
579 ktls_post_encryption_processing,
580 ktls_prepare_write_bio
583 const OSSL_RECORD_METHOD ossl_ktls_record_method = {
584 ktls_new_record_layer,
586 tls_unprocessed_read_pending,
587 tls_processed_read_pending,
588 tls_app_data_pending,
591 tls_retry_write_records,
596 tls_set_protocol_version,
597 tls_set_plain_alerts,
598 tls_set_first_handshake,
599 tls_set_max_pipelines,
604 tls_set_max_frag_len,
606 tls_increment_sequence_ctr,