X-Git-Url: https://git.openssl.org/?p=openssl.git;a=blobdiff_plain;f=engines%2Fe_afalg.c;h=99516cb1bb4a0dc5ed4d1f14143486e9592d5b5a;hp=dacbe358cb6a8628dd0ecd93f8912052835fd954;hb=HEAD;hpb=62cc845fc955c8d4de7b703f57bfd8e5854f00f4 diff --git a/engines/e_afalg.c b/engines/e_afalg.c index dacbe358cb..3ca5b0211e 100644 --- a/engines/e_afalg.c +++ b/engines/e_afalg.c @@ -1,5 +1,5 @@ /* - * Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 2016-2024 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the Apache License 2.0 (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy @@ -7,6 +7,9 @@ * https://www.openssl.org/source/license.html */ +/* We need to use some deprecated APIs */ +#define OPENSSL_SUPPRESS_DEPRECATED + /* Required for vmsplice */ #ifndef _GNU_SOURCE # define _GNU_SOURCE @@ -121,11 +124,56 @@ static ossl_inline int io_read(aio_context_t ctx, long n, struct iocb **iocb) return syscall(__NR_io_submit, ctx, n, iocb); } +/* A version of 'struct timespec' with 32-bit time_t and nanoseconds. */ +struct __timespec32 +{ + __kernel_long_t tv_sec; + __kernel_long_t tv_nsec; +}; + static ossl_inline int io_getevents(aio_context_t ctx, long min, long max, struct io_event *events, struct timespec *timeout) { - return syscall(__NR_io_getevents, ctx, min, max, events, timeout); +#if defined(__NR_io_pgetevents_time64) + /* Check if we are a 32-bit architecture with a 64-bit time_t */ + if (sizeof(*timeout) != sizeof(struct __timespec32)) { + int ret = syscall(__NR_io_pgetevents_time64, ctx, min, max, events, + timeout, NULL); + if (ret == 0 || errno != ENOSYS) + return ret; + } +#endif + +#if defined(__NR_io_getevents) + if (sizeof(*timeout) == sizeof(struct __timespec32)) + /* + * time_t matches our architecture length, we can just use + * __NR_io_getevents + */ + return syscall(__NR_io_getevents, ctx, min, max, events, timeout); + else { + /* + * We don't have __NR_io_pgetevents_time64, but we are using a + * 64-bit time_t on a 32-bit architecture. If we can fit the + * timeout value in a 32-bit time_t, then let's do that + * and then use the __NR_io_getevents syscall. + */ + if (timeout && timeout->tv_sec == (long)timeout->tv_sec) { + struct __timespec32 ts32; + + ts32.tv_sec = (__kernel_long_t) timeout->tv_sec; + ts32.tv_nsec = (__kernel_long_t) timeout->tv_nsec; + + return syscall(__NR_io_getevents, ctx, min, max, events, ts32); + } else { + return syscall(__NR_io_getevents, ctx, min, max, events, NULL); + } + } +#endif + + errno = ENOSYS; + return -1; } static void afalg_waitfd_cleanup(ASYNC_WAIT_CTX *ctx, const void *key, @@ -276,6 +324,15 @@ static int afalg_fin_cipher_aio(afalg_aio *aio, int sfd, unsigned char *buf, } if (eval > 0) { +#ifdef OSSL_SANITIZE_MEMORY + /* + * In a memory sanitiser build, the changes to memory made by the + * system call aren't reliably detected. By initialising the + * memory here, the sanitiser is told that they are okay. + */ + memset(events, 0, sizeof(events)); +#endif + /* Get results of AIO read */ r = io_getevents(aio->aio_ctx, 1, MAX_INFLIGHTS, events, &timeout); @@ -298,6 +355,18 @@ static int afalg_fin_cipher_aio(afalg_aio *aio, int sfd, unsigned char *buf, } continue; } else { + char strbuf[32]; + /* + * sometimes __s64 is defined as long long int + * but on some archs ( like mips64 or powerpc64 ) it's just long int + * + * to be able to use BIO_snprintf() with %lld without warnings + * copy events[0].res to an long long int variable + * + * because long long int should always be at least 64 bit this should work + */ + long long int op_ret = events[0].res; + /* * Retries exceed for -EBUSY or unrecoverable error * condition for this instance of operation. @@ -305,6 +374,17 @@ static int afalg_fin_cipher_aio(afalg_aio *aio, int sfd, unsigned char *buf, ALG_WARN ("%s(%d): Crypto Operation failed with code %lld\n", __FILE__, __LINE__, events[0].res); + BIO_snprintf(strbuf, sizeof(strbuf), "%lld", op_ret); + switch (events[0].res) { + case -ENOMEM: + AFALGerr(0, AFALG_R_KERNEL_OP_FAILED); + ERR_add_error_data(3, "-ENOMEM ( code ", strbuf, " )"); + break; + default: + AFALGerr(0, AFALG_R_KERNEL_OP_FAILED); + ERR_add_error_data(2, "code ", strbuf); + break; + } return 0; } } @@ -457,7 +537,7 @@ static int afalg_start_cipher_sk(afalg_ctx *actx, const unsigned char *in, /* * vmsplice and splice are used to pin the user space input buffer for - * kernel space processing avoiding copys from user to kernel space + * kernel space processing avoiding copies from user to kernel space */ ret = vmsplice(actx->zc_pipe[1], &iov, 1, SPLICE_F_GIFT); if (ret < 0) { @@ -496,7 +576,7 @@ static int afalg_cipher_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc) { int ciphertype; - int ret; + int ret, len; afalg_ctx *actx; const char *ciphername; @@ -505,7 +585,7 @@ static int afalg_cipher_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, return 0; } - if (EVP_CIPHER_CTX_cipher(ctx) == NULL) { + if (EVP_CIPHER_CTX_get0_cipher(ctx) == NULL) { ALG_WARN("%s(%d): Cipher object NULL\n", __FILE__, __LINE__); return 0; } @@ -516,7 +596,7 @@ static int afalg_cipher_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, return 0; } - ciphertype = EVP_CIPHER_CTX_nid(ctx); + ciphertype = EVP_CIPHER_CTX_get_nid(ctx); switch (ciphertype) { case NID_aes_128_cbc: case NID_aes_192_cbc: @@ -529,9 +609,9 @@ static int afalg_cipher_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, return 0; } - if (ALG_AES_IV_LEN != EVP_CIPHER_CTX_iv_length(ctx)) { + if (ALG_AES_IV_LEN != EVP_CIPHER_CTX_get_iv_length(ctx)) { ALG_WARN("%s(%d): Unsupported IV length :%d\n", __FILE__, __LINE__, - EVP_CIPHER_CTX_iv_length(ctx)); + EVP_CIPHER_CTX_get_iv_length(ctx)); return 0; } @@ -540,8 +620,9 @@ static int afalg_cipher_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, if (ret < 1) return 0; - - ret = afalg_set_key(actx, key, EVP_CIPHER_CTX_key_length(ctx)); + if ((len = EVP_CIPHER_CTX_get_key_length(ctx)) <= 0) + goto err; + ret = afalg_set_key(actx, key, len); if (ret < 1) goto err; @@ -587,14 +668,14 @@ static int afalg_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, * set iv now for decrypt operation as the input buffer can be * overwritten for inplace operation where in = out. */ - if (EVP_CIPHER_CTX_encrypting(ctx) == 0) { + if (EVP_CIPHER_CTX_is_encrypting(ctx) == 0) { memcpy(nxtiv, in + (inl - ALG_AES_IV_LEN), ALG_AES_IV_LEN); } /* Send input data to kernel space */ ret = afalg_start_cipher_sk(actx, (unsigned char *)in, inl, EVP_CIPHER_CTX_iv(ctx), - EVP_CIPHER_CTX_encrypting(ctx)); + EVP_CIPHER_CTX_is_encrypting(ctx)); if (ret < 1) { return 0; } @@ -604,7 +685,7 @@ static int afalg_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, if (ret < 1) return 0; - if (EVP_CIPHER_CTX_encrypting(ctx)) { + if (EVP_CIPHER_CTX_is_encrypting(ctx)) { memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), out + (inl - ALG_AES_IV_LEN), ALG_AES_IV_LEN); } else { @@ -625,11 +706,8 @@ static int afalg_cipher_cleanup(EVP_CIPHER_CTX *ctx) } actx = (afalg_ctx *) EVP_CIPHER_CTX_get_cipher_data(ctx); - if (actx == NULL || actx->init_done != MAGIC_INIT_NUM) { - ALG_WARN("%s afalg ctx passed\n", - ctx == NULL ? "NULL" : "Uninitialised"); - return 0; - } + if (actx == NULL || actx->init_done != MAGIC_INIT_NUM) + return 1; close(actx->sfd); close(actx->bfd); @@ -662,6 +740,9 @@ static cbc_handles *get_cipher_handle(int nid) static const EVP_CIPHER *afalg_aes_cbc(int nid) { cbc_handles *cipher_handle = get_cipher_handle(nid); + + if (cipher_handle == NULL) + return NULL; if (cipher_handle->_hidden == NULL && ((cipher_handle->_hidden = EVP_CIPHER_meth_new(nid, @@ -729,7 +810,7 @@ static int bind_afalg(ENGINE *e) * now, as bind_aflag can only be called by one thread at a * time. */ - for(i = 0; i < OSSL_NELEM(afalg_cipher_nids); i++) { + for (i = 0; i < OSSL_NELEM(afalg_cipher_nids); i++) { if (afalg_aes_cbc(afalg_cipher_nids[i]) == NULL) { AFALGerr(AFALG_F_BIND_AFALG, AFALG_R_INIT_FAILED); return 0; @@ -753,8 +834,10 @@ static int bind_helper(ENGINE *e, const char *id) if (!afalg_chk_platform()) return 0; - if (!bind_afalg(e)) + if (!bind_afalg(e)) { + afalg_destroy(e); return 0; + } return 1; } @@ -829,9 +912,19 @@ void engine_load_afalg_int(void) toadd = engine_afalg(); if (toadd == NULL) return; + ERR_set_mark(); ENGINE_add(toadd); + /* + * If the "add" worked, it gets a structural reference. So either way, we + * release our just-created reference. + */ ENGINE_free(toadd); - ERR_clear_error(); + /* + * If the "add" didn't work, it was probably a conflict because it was + * already added (eg. someone calling ENGINE_load_blah then calling + * ENGINE_load_builtin_engines() perhaps). + */ + ERR_pop_to_mark(); } # endif @@ -848,7 +941,7 @@ static int afalg_finish(ENGINE *e) static int free_cbc(void) { short unsigned int i; - for(i = 0; i < OSSL_NELEM(afalg_cipher_nids); i++) { + for (i = 0; i < OSSL_NELEM(afalg_cipher_nids); i++) { EVP_CIPHER_meth_free(cbc_handle[i]._hidden); cbc_handle[i]._hidden = NULL; }