2 * Copyright 2022-2023 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 #include "internal/quic_record_tx.h"
11 #include "internal/qlog_event_helpers.h"
12 #include "internal/bio_addr.h"
13 #include "internal/common.h"
14 #include "quic_record_shared.h"
15 #include "internal/list.h"
16 #include "../ssl_local.h"
21 * Encrypted packets awaiting transmission are kept in TX Entries (TXEs), which
22 * are queued in linked lists just like TXEs.
24 typedef struct txe_st TXE;
27 OSSL_LIST_MEMBER(txe, TXE);
28 size_t data_len, alloc_len;
31 * Destination and local addresses, as applicable. Both of these are only
32 * used if the family is not AF_UNSPEC.
37 * alloc_len allocated bytes (of which data_len bytes are valid) follow this
42 DEFINE_LIST_OF(txe, TXE);
43 typedef OSSL_LIST(txe) TXE_LIST;
45 static ossl_inline unsigned char *txe_data(const TXE *e)
47 return (unsigned char *)(e + 1);
58 /* Per encryption-level state. */
59 OSSL_QRL_ENC_LEVEL_SET el_set;
64 /* QLOG instance if in use, or NULL. */
67 /* TX maximum datagram payload length. */
71 * List of TXEs which are not currently in use. These are moved to the
72 * pending list (possibly via tx_cons first) as they are filled.
77 * List of TXEs which are filled with completed datagrams ready to be
81 size_t pending_count; /* items in list */
82 size_t pending_bytes; /* sum(txe->data_len) in pending */
85 * TXE which is under construction for coalescing purposes, if any.
86 * This TXE is neither on the free nor pending list. Once the datagram
87 * is completed, it is moved to the pending list.
90 size_t cons_count; /* num packets */
93 * Number of packets transmitted in this key epoch. Used to enforce AEAD
94 * confidentiality limit.
96 uint64_t epoch_pkt_count;
98 ossl_mutate_packet_cb mutatecb;
99 ossl_finish_mutate_cb finishmutatecb;
102 /* Message callback related arguments */
103 ossl_msg_cb msg_callback;
104 void *msg_callback_arg;
105 SSL *msg_callback_ssl;
108 /* Instantiates a new QTX. */
109 OSSL_QTX *ossl_qtx_new(const OSSL_QTX_ARGS *args)
113 if (args->mdpl < QUIC_MIN_INITIAL_DGRAM_LEN)
116 qtx = OPENSSL_zalloc(sizeof(OSSL_QTX));
120 qtx->libctx = args->libctx;
121 qtx->propq = args->propq;
122 qtx->bio = args->bio;
123 qtx->mdpl = args->mdpl;
124 qtx->qlog = args->qlog;
128 static void qtx_cleanup_txl(TXE_LIST *l)
132 for (e = ossl_list_txe_head(l); e != NULL; e = enext) {
133 enext = ossl_list_txe_next(e);
139 void ossl_qtx_free(OSSL_QTX *qtx)
146 /* Free TXE queue data. */
147 qtx_cleanup_txl(&qtx->pending);
148 qtx_cleanup_txl(&qtx->free);
149 OPENSSL_free(qtx->cons);
151 /* Drop keying material and crypto resources. */
152 for (i = 0; i < QUIC_ENC_LEVEL_NUM; ++i)
153 ossl_qrl_enc_level_set_discard(&qtx->el_set, i);
158 /* Set mutator callbacks for test framework support */
159 void ossl_qtx_set_mutator(OSSL_QTX *qtx, ossl_mutate_packet_cb mutatecb,
160 ossl_finish_mutate_cb finishmutatecb, void *mutatearg)
162 qtx->mutatecb = mutatecb;
163 qtx->finishmutatecb = finishmutatecb;
164 qtx->mutatearg = mutatearg;
167 int ossl_qtx_provide_secret(OSSL_QTX *qtx,
171 const unsigned char *secret,
174 if (enc_level >= QUIC_ENC_LEVEL_NUM)
177 return ossl_qrl_enc_level_set_provide_secret(&qtx->el_set,
189 int ossl_qtx_discard_enc_level(OSSL_QTX *qtx, uint32_t enc_level)
191 if (enc_level >= QUIC_ENC_LEVEL_NUM)
194 ossl_qrl_enc_level_set_discard(&qtx->el_set, enc_level);
198 int ossl_qtx_is_enc_level_provisioned(OSSL_QTX *qtx, uint32_t enc_level)
200 return ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1) != NULL;
203 /* Allocate a new TXE. */
204 static TXE *qtx_alloc_txe(size_t alloc_len)
208 if (alloc_len >= SIZE_MAX - sizeof(TXE))
211 txe = OPENSSL_malloc(sizeof(TXE) + alloc_len);
215 ossl_list_txe_init_elem(txe);
216 txe->alloc_len = alloc_len;
222 * Ensures there is at least one TXE in the free list, allocating a new entry
223 * if necessary. The returned TXE is in the free list; it is not popped.
225 * alloc_len is a hint which may be used to determine the TXE size if allocation
226 * is necessary. Returns NULL on allocation failure.
228 static TXE *qtx_ensure_free_txe(OSSL_QTX *qtx, size_t alloc_len)
232 txe = ossl_list_txe_head(&qtx->free);
236 txe = qtx_alloc_txe(alloc_len);
240 ossl_list_txe_insert_tail(&qtx->free, txe);
245 * Resize the data buffer attached to an TXE to be n bytes in size. The address
246 * of the TXE might change; the new address is returned, or NULL on failure, in
247 * which case the original TXE remains valid.
249 static TXE *qtx_resize_txe(OSSL_QTX *qtx, TXE_LIST *txl, TXE *txe, size_t n)
253 /* Should never happen. */
257 if (n >= SIZE_MAX - sizeof(TXE))
260 /* Remove the item from the list to avoid accessing freed memory */
261 p = ossl_list_txe_prev(txe);
262 ossl_list_txe_remove(txl, txe);
265 * NOTE: We do not clear old memory, although it does contain decrypted
268 txe2 = OPENSSL_realloc(txe, sizeof(TXE) + n);
269 if (txe2 == NULL || txe == txe2) {
271 ossl_list_txe_insert_head(txl, txe);
273 ossl_list_txe_insert_after(txl, p, txe);
278 ossl_list_txe_insert_head(txl, txe2);
280 ossl_list_txe_insert_after(txl, p, txe2);
282 if (qtx->cons == txe)
290 * Ensure the data buffer attached to an TXE is at least n bytes in size.
291 * Returns NULL on failure.
293 static TXE *qtx_reserve_txe(OSSL_QTX *qtx, TXE_LIST *txl,
296 if (txe->alloc_len >= n)
299 return qtx_resize_txe(qtx, txl, txe, n);
302 /* Move a TXE from pending to free. */
303 static void qtx_pending_to_free(OSSL_QTX *qtx)
305 TXE *txe = ossl_list_txe_head(&qtx->pending);
308 ossl_list_txe_remove(&qtx->pending, txe);
309 --qtx->pending_count;
310 qtx->pending_bytes -= txe->data_len;
311 ossl_list_txe_insert_tail(&qtx->free, txe);
314 /* Add a TXE not currently in any list to the pending list. */
315 static void qtx_add_to_pending(OSSL_QTX *qtx, TXE *txe)
317 ossl_list_txe_insert_tail(&qtx->pending, txe);
318 ++qtx->pending_count;
319 qtx->pending_bytes += txe->data_len;
323 const OSSL_QTX_IOVEC *iovec;
324 size_t num_iovec, idx, byte_off, bytes_remaining;
327 static size_t iovec_total_bytes(const OSSL_QTX_IOVEC *iovec,
332 for (i = 0; i < num_iovec; ++i)
333 l += iovec[i].buf_len;
338 static void iovec_cur_init(struct iovec_cur *cur,
339 const OSSL_QTX_IOVEC *iovec,
343 cur->num_iovec = num_iovec;
346 cur->bytes_remaining = iovec_total_bytes(iovec, num_iovec);
350 * Get an extent of bytes from the iovec cursor. *buf is set to point to the
351 * buffer and the number of bytes in length of the buffer is returned. This
352 * value may be less than the max_buf_len argument. If no more data is
353 * available, returns 0.
355 static size_t iovec_cur_get_buffer(struct iovec_cur *cur,
356 const unsigned char **buf,
361 if (max_buf_len == 0) {
367 if (cur->idx >= cur->num_iovec)
370 l = cur->iovec[cur->idx].buf_len - cur->byte_off;
375 *buf = cur->iovec[cur->idx].buf + cur->byte_off;
377 cur->bytes_remaining -= l;
382 * Zero-length iovec entry or we already consumed all of it, try the
390 /* Determines the size of the AEAD output given the input size. */
391 int ossl_qtx_calculate_ciphertext_payload_len(OSSL_QTX *qtx, uint32_t enc_level,
392 size_t plaintext_len,
393 size_t *ciphertext_len)
395 OSSL_QRL_ENC_LEVEL *el
396 = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
405 * We currently only support ciphers with a 1:1 mapping between plaintext
406 * and ciphertext size, save for authentication tag.
408 tag_len = ossl_qrl_get_suite_cipher_tag_len(el->suite_id);
410 *ciphertext_len = plaintext_len + tag_len;
414 /* Determines the size of the AEAD input given the output size. */
415 int ossl_qtx_calculate_plaintext_payload_len(OSSL_QTX *qtx, uint32_t enc_level,
416 size_t ciphertext_len,
417 size_t *plaintext_len)
419 OSSL_QRL_ENC_LEVEL *el
420 = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
428 tag_len = ossl_qrl_get_suite_cipher_tag_len(el->suite_id);
430 if (ciphertext_len <= tag_len) {
435 *plaintext_len = ciphertext_len - tag_len;
439 /* Any other error (including packet being too big for MDPL). */
440 #define QTX_FAIL_GENERIC (-1)
443 * Returned where there is insufficient room in the datagram to write the
446 #define QTX_FAIL_INSUFFICIENT_LEN (-2)
448 static int qtx_write_hdr(OSSL_QTX *qtx, const QUIC_PKT_HDR *hdr, TXE *txe,
449 QUIC_PKT_HDR_PTRS *ptrs)
453 unsigned char *data = txe_data(txe) + txe->data_len;
455 if (!WPACKET_init_static_len(&wpkt, data, txe->alloc_len - txe->data_len, 0))
458 if (!ossl_quic_wire_encode_pkt_hdr(&wpkt, hdr->dst_conn_id.id_len,
460 || !WPACKET_get_total_written(&wpkt, &l)) {
461 WPACKET_finish(&wpkt);
464 WPACKET_finish(&wpkt);
466 if (qtx->msg_callback != NULL)
467 qtx->msg_callback(1, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_PACKET, data, l,
468 qtx->msg_callback_ssl, qtx->msg_callback_arg);
475 static int qtx_encrypt_into_txe(OSSL_QTX *qtx, struct iovec_cur *cur, TXE *txe,
476 uint32_t enc_level, QUIC_PN pn,
477 const unsigned char *hdr, size_t hdr_len,
478 QUIC_PKT_HDR_PTRS *ptrs)
480 int l = 0, l2 = 0, nonce_len;
481 OSSL_QRL_ENC_LEVEL *el
482 = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
483 unsigned char nonce[EVP_MAX_IV_LENGTH];
485 EVP_CIPHER_CTX *cctx = NULL;
487 /* We should not have been called if we do not have key material. */
488 if (!ossl_assert(el != NULL)) {
489 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
494 * Have we already encrypted the maximum number of packets using the current
497 if (el->op_count >= ossl_qrl_get_suite_max_pkt(el->suite_id)) {
498 ERR_raise(ERR_LIB_SSL, SSL_R_MAXIMUM_ENCRYPTED_PKTS_REACHED);
503 * TX key update is simpler than for RX; once we initiate a key update, we
504 * never need the old keys, as we never deliberately send a packet with old
505 * keys. Thus the EL always uses keyslot 0 for the TX side.
508 if (!ossl_assert(cctx != NULL)) {
509 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
513 /* Construct nonce (nonce=IV ^ PN). */
514 nonce_len = EVP_CIPHER_CTX_get_iv_length(cctx);
515 if (!ossl_assert(nonce_len >= (int)sizeof(QUIC_PN))) {
516 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
520 memcpy(nonce, el->iv[0], (size_t)nonce_len);
521 for (i = 0; i < sizeof(QUIC_PN); ++i)
522 nonce[nonce_len - i - 1] ^= (unsigned char)(pn >> (i * 8));
524 /* type and key will already have been setup; feed the IV. */
525 if (EVP_CipherInit_ex(cctx, NULL, NULL, NULL, nonce, /*enc=*/1) != 1) {
526 ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
531 if (EVP_CipherUpdate(cctx, NULL, &l, hdr, hdr_len) != 1) {
532 ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
536 /* Encrypt plaintext directly into TXE. */
538 const unsigned char *src;
541 src_len = iovec_cur_get_buffer(cur, &src, SIZE_MAX);
545 if (EVP_CipherUpdate(cctx, txe_data(txe) + txe->data_len,
546 &l, src, src_len) != 1) {
547 ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
551 #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
552 /* Ignore what we just encrypted and overwrite it with the plaintext */
553 memcpy(txe_data(txe) + txe->data_len, src, l);
556 assert(l > 0 && src_len == (size_t)l);
557 txe->data_len += src_len;
560 /* Finalise and get tag. */
561 if (EVP_CipherFinal_ex(cctx, NULL, &l2) != 1) {
562 ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
566 if (EVP_CIPHER_CTX_ctrl(cctx, EVP_CTRL_AEAD_GET_TAG,
567 el->tag_len, txe_data(txe) + txe->data_len) != 1) {
568 ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
572 txe->data_len += el->tag_len;
574 /* Apply header protection. */
575 if (!ossl_quic_hdr_protector_encrypt(&el->hpr, ptrs))
583 * Append a packet to the TXE buffer, serializing and encrypting it in the
586 static int qtx_write(OSSL_QTX *qtx, const OSSL_QTX_PKT *pkt, TXE *txe,
589 int ret, needs_encrypt;
590 size_t hdr_len, pred_hdr_len, payload_len, pkt_len, space_left;
591 size_t min_len, orig_data_len;
592 struct iovec_cur cur;
593 QUIC_PKT_HDR_PTRS ptrs;
594 unsigned char *hdr_start;
595 OSSL_QRL_ENC_LEVEL *el = NULL;
597 const OSSL_QTX_IOVEC *iovec;
601 * Determine if the packet needs encryption and the minimum conceivable
602 * serialization length.
604 if (!ossl_quic_pkt_type_is_encrypted(pkt->hdr->type)) {
606 min_len = QUIC_MIN_VALID_PKT_LEN;
609 min_len = QUIC_MIN_VALID_PKT_LEN_CRYPTO;
610 el = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
611 if (!ossl_assert(el != NULL)) /* should already have been checked */
615 orig_data_len = txe->data_len;
616 space_left = txe->alloc_len - txe->data_len;
617 if (space_left < min_len) {
618 /* Not even a possibility of it fitting. */
619 ret = QTX_FAIL_INSUFFICIENT_LEN;
623 /* Set some fields in the header we are responsible for. */
624 if (pkt->hdr->type == QUIC_PKT_TYPE_1RTT)
625 pkt->hdr->key_phase = (unsigned char)(el->key_epoch & 1);
627 /* If we are running tests then mutate_packet may be non NULL */
628 if (qtx->mutatecb != NULL) {
629 if (!qtx->mutatecb(pkt->hdr, pkt->iovec, pkt->num_iovec, &hdr,
630 &iovec, &num_iovec, qtx->mutatearg)) {
631 ret = QTX_FAIL_GENERIC;
637 num_iovec = pkt->num_iovec;
640 /* Walk the iovecs to determine actual input payload length. */
641 iovec_cur_init(&cur, iovec, num_iovec);
643 if (cur.bytes_remaining == 0) {
644 /* No zero-length payloads allowed. */
645 ret = QTX_FAIL_GENERIC;
649 /* Determine encrypted payload length. */
651 ossl_qtx_calculate_ciphertext_payload_len(qtx, enc_level,
655 payload_len = cur.bytes_remaining;
657 /* Determine header length. */
659 hdr->len = payload_len;
660 pred_hdr_len = ossl_quic_wire_get_encoded_pkt_hdr_len(hdr->dst_conn_id.id_len,
662 if (pred_hdr_len == 0) {
663 ret = QTX_FAIL_GENERIC;
667 /* We now definitively know our packet length. */
668 pkt_len = pred_hdr_len + payload_len;
670 if (pkt_len > space_left) {
671 ret = QTX_FAIL_INSUFFICIENT_LEN;
675 if (ossl_quic_pkt_type_has_pn(hdr->type)) {
676 if (!ossl_quic_wire_encode_pkt_hdr_pn(pkt->pn,
679 ret = QTX_FAIL_GENERIC;
684 /* Append the header to the TXE. */
685 hdr_start = txe_data(txe) + txe->data_len;
686 if (!qtx_write_hdr(qtx, hdr, txe, &ptrs)) {
687 ret = QTX_FAIL_GENERIC;
691 hdr_len = (txe_data(txe) + txe->data_len) - hdr_start;
692 assert(hdr_len == pred_hdr_len);
694 if (!needs_encrypt) {
695 /* Just copy the payload across. */
696 const unsigned char *src;
700 /* Buffer length has already been checked above. */
701 src_len = iovec_cur_get_buffer(&cur, &src, SIZE_MAX);
705 memcpy(txe_data(txe) + txe->data_len, src, src_len);
706 txe->data_len += src_len;
709 /* Encrypt into TXE. */
710 if (!qtx_encrypt_into_txe(qtx, &cur, txe, enc_level, pkt->pn,
711 hdr_start, hdr_len, &ptrs)) {
712 ret = QTX_FAIL_GENERIC;
716 assert(txe->data_len - orig_data_len == pkt_len);
719 if (qtx->finishmutatecb != NULL)
720 qtx->finishmutatecb(qtx->mutatearg);
725 * Restore original length so we don't leave a half-written packet in the
728 txe->data_len = orig_data_len;
729 if (qtx->finishmutatecb != NULL)
730 qtx->finishmutatecb(qtx->mutatearg);
734 static TXE *qtx_ensure_cons(OSSL_QTX *qtx)
736 TXE *txe = qtx->cons;
741 txe = qtx_ensure_free_txe(qtx, qtx->mdpl);
745 ossl_list_txe_remove(&qtx->free, txe);
752 static int addr_eq(const BIO_ADDR *a, const BIO_ADDR *b)
754 return ((a == NULL || BIO_ADDR_family(a) == AF_UNSPEC)
755 && (b == NULL || BIO_ADDR_family(b) == AF_UNSPEC))
756 || (a != NULL && b != NULL && memcmp(a, b, sizeof(*a)) == 0);
759 int ossl_qtx_write_pkt(OSSL_QTX *qtx, const OSSL_QTX_PKT *pkt)
762 int coalescing = (pkt->flags & OSSL_QTX_PKT_FLAG_COALESCE) != 0;
767 /* Must have EL configured, must have header. */
768 if (pkt->hdr == NULL)
771 enc_level = ossl_quic_pkt_type_to_enc_level(pkt->hdr->type);
773 /* Some packet types must be in a packet all by themselves. */
774 if (!ossl_quic_pkt_type_can_share_dgram(pkt->hdr->type))
775 ossl_qtx_finish_dgram(qtx);
776 else if (enc_level >= QUIC_ENC_LEVEL_NUM
777 || ossl_qrl_enc_level_set_have_el(&qtx->el_set, enc_level) != 1) {
778 /* All other packet types are encrypted. */
782 was_coalescing = (qtx->cons != NULL && qtx->cons->data_len > 0);
784 if (!addr_eq(&qtx->cons->peer, pkt->peer)
785 || !addr_eq(&qtx->cons->local, pkt->local)) {
786 /* Must stop coalescing if addresses have changed */
787 ossl_qtx_finish_dgram(qtx);
793 * Start a new coalescing session or continue using the existing one and
794 * serialize/encrypt the packet. We always encrypt packets as soon as
795 * our caller gives them to us, which relieves the caller of any need to
796 * keep the plaintext around.
798 txe = qtx_ensure_cons(qtx);
800 return 0; /* allocation failure */
803 * Ensure TXE has at least MDPL bytes allocated. This should only be
804 * possible if the MDPL has increased.
806 if (!qtx_reserve_txe(qtx, NULL, txe, qtx->mdpl))
809 if (!was_coalescing) {
810 /* Set addresses in TXE. */
811 if (pkt->peer != NULL)
812 txe->peer = *pkt->peer;
814 BIO_ADDR_clear(&txe->peer);
816 if (pkt->local != NULL)
817 txe->local = *pkt->local;
819 BIO_ADDR_clear(&txe->local);
822 ret = qtx_write(qtx, pkt, txe, enc_level);
825 } else if (ret == QTX_FAIL_INSUFFICIENT_LEN) {
826 if (was_coalescing) {
828 * We failed due to insufficient length, so end the current
829 * datagram and try again.
831 ossl_qtx_finish_dgram(qtx);
835 * We failed due to insufficient length, but we were not
836 * coalescing/started with an empty datagram, so any future
837 * attempt to write this packet must also fail.
842 return 0; /* other error */
849 * Some packet types cannot have another packet come after them.
851 if (ossl_quic_pkt_type_must_be_last(pkt->hdr->type))
855 ossl_qtx_finish_dgram(qtx);
861 * Finish any incomplete datagrams for transmission which were flagged for
862 * coalescing. If there is no current coalescing datagram, this is a no-op.
864 void ossl_qtx_finish_dgram(OSSL_QTX *qtx)
866 TXE *txe = qtx->cons;
871 if (txe->data_len == 0)
873 * If we did not put anything in the datagram, just move it back to the
876 ossl_list_txe_insert_tail(&qtx->free, txe);
878 qtx_add_to_pending(qtx, txe);
884 static void txe_to_msg(TXE *txe, BIO_MSG *msg)
886 msg->data = txe_data(txe);
887 msg->data_len = txe->data_len;
890 = BIO_ADDR_family(&txe->peer) != AF_UNSPEC ? &txe->peer : NULL;
892 = BIO_ADDR_family(&txe->local) != AF_UNSPEC ? &txe->local : NULL;
895 #define MAX_MSGS_PER_SEND 32
897 int ossl_qtx_flush_net(OSSL_QTX *qtx)
899 BIO_MSG msg[MAX_MSGS_PER_SEND];
900 size_t wr, i, total_written = 0;
904 if (ossl_list_txe_head(&qtx->pending) == NULL)
905 return QTX_FLUSH_NET_RES_OK; /* Nothing to send. */
907 if (qtx->bio == NULL)
908 return QTX_FLUSH_NET_RES_PERMANENT_FAIL;
911 for (txe = ossl_list_txe_head(&qtx->pending), i = 0;
912 txe != NULL && i < OSSL_NELEM(msg);
913 txe = ossl_list_txe_next(txe), ++i)
914 txe_to_msg(txe, &msg[i]);
917 /* Nothing to send. */
921 res = BIO_sendmmsg(qtx->bio, msg, sizeof(BIO_MSG), i, 0, &wr);
922 if (res && wr == 0) {
924 * Treat 0 messages sent as a transient error and just stop for now.
926 ERR_clear_last_mark();
930 * We did not get anything, so further calls will probably not
933 if (BIO_err_is_non_fatal(ERR_peek_last_error())) {
934 /* Transient error, just stop for now, clearing the error. */
938 /* Non-transient error, fail and do not clear the error. */
939 ERR_clear_last_mark();
940 return QTX_FLUSH_NET_RES_PERMANENT_FAIL;
944 ERR_clear_last_mark();
947 * Remove everything which was successfully sent from the pending queue.
949 for (i = 0; i < wr; ++i) {
950 if (qtx->msg_callback != NULL)
951 qtx->msg_callback(1, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_DATAGRAM,
952 msg[i].data, msg[i].data_len,
953 qtx->msg_callback_ssl,
954 qtx->msg_callback_arg);
955 qtx_pending_to_free(qtx);
961 return total_written > 0
962 ? QTX_FLUSH_NET_RES_OK
963 : QTX_FLUSH_NET_RES_TRANSIENT_FAIL;
966 int ossl_qtx_pop_net(OSSL_QTX *qtx, BIO_MSG *msg)
968 TXE *txe = ossl_list_txe_head(&qtx->pending);
973 txe_to_msg(txe, msg);
974 qtx_pending_to_free(qtx);
978 void ossl_qtx_set_bio(OSSL_QTX *qtx, BIO *bio)
983 int ossl_qtx_set_mdpl(OSSL_QTX *qtx, size_t mdpl)
985 if (mdpl < QUIC_MIN_INITIAL_DGRAM_LEN)
992 size_t ossl_qtx_get_mdpl(OSSL_QTX *qtx)
997 size_t ossl_qtx_get_queue_len_datagrams(OSSL_QTX *qtx)
999 return qtx->pending_count;
1002 size_t ossl_qtx_get_queue_len_bytes(OSSL_QTX *qtx)
1004 return qtx->pending_bytes;
1007 size_t ossl_qtx_get_cur_dgram_len_bytes(OSSL_QTX *qtx)
1009 return qtx->cons != NULL ? qtx->cons->data_len : 0;
1012 size_t ossl_qtx_get_unflushed_pkt_count(OSSL_QTX *qtx)
1014 return qtx->cons_count;
1017 int ossl_qtx_trigger_key_update(OSSL_QTX *qtx)
1019 return ossl_qrl_enc_level_set_key_update(&qtx->el_set,
1020 QUIC_ENC_LEVEL_1RTT);
1023 uint64_t ossl_qtx_get_cur_epoch_pkt_count(OSSL_QTX *qtx, uint32_t enc_level)
1025 OSSL_QRL_ENC_LEVEL *el;
1027 el = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
1031 return el->op_count;
1034 uint64_t ossl_qtx_get_max_epoch_pkt_count(OSSL_QTX *qtx, uint32_t enc_level)
1036 OSSL_QRL_ENC_LEVEL *el;
1038 el = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
1042 return ossl_qrl_get_suite_max_pkt(el->suite_id);
1045 void ossl_qtx_set_msg_callback(OSSL_QTX *qtx, ossl_msg_cb msg_callback,
1046 SSL *msg_callback_ssl)
1048 qtx->msg_callback = msg_callback;
1049 qtx->msg_callback_ssl = msg_callback_ssl;
1052 void ossl_qtx_set_msg_callback_arg(OSSL_QTX *qtx, void *msg_callback_arg)
1054 qtx->msg_callback_arg = msg_callback_arg;
1057 uint64_t ossl_qtx_get_key_epoch(OSSL_QTX *qtx)
1059 OSSL_QRL_ENC_LEVEL *el;
1061 el = ossl_qrl_enc_level_set_get(&qtx->el_set, QUIC_ENC_LEVEL_1RTT, 1);
1065 return el->key_epoch;