/*
- * Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved.
+ * Copyright 2016-2021 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* https://www.openssl.org/source/license.html
*/
+/* We need to use some deprecated APIs */
+#define OPENSSL_SUPPRESS_DEPRECATED
+
/* Required for vmsplice */
#ifndef _GNU_SOURCE
# define _GNU_SOURCE
return syscall(__NR_io_submit, ctx, n, iocb);
}
+/* A version of 'struct timespec' with 32-bit time_t and nanoseconds. */
+struct __timespec32
+{
+ __kernel_long_t tv_sec;
+ __kernel_long_t tv_nsec;
+};
+
static ossl_inline int io_getevents(aio_context_t ctx, long min, long max,
struct io_event *events,
struct timespec *timeout)
{
- return syscall(__NR_io_getevents, ctx, min, max, events, timeout);
+#if defined(__NR_io_pgetevents_time64)
+ /* Check if we are a 32-bit architecture with a 64-bit time_t */
+ if (sizeof(*timeout) != sizeof(struct __timespec32)) {
+ int ret = syscall(__NR_io_pgetevents_time64, ctx, min, max, events,
+ timeout, NULL);
+ if (ret == 0 || errno != ENOSYS)
+ return ret;
+ }
+#endif
+
+#if defined(__NR_io_getevents)
+ if (sizeof(*timeout) == sizeof(struct __timespec32))
+ /*
+ * time_t matches our architecture length, we can just use
+ * __NR_io_getevents
+ */
+ return syscall(__NR_io_getevents, ctx, min, max, events, timeout);
+ else {
+ /*
+ * We don't have __NR_io_pgetevents_time64, but we are using a
+ * 64-bit time_t on a 32-bit architecture. If we can fit the
+ * timeout value in a 32-bit time_t, then let's do that
+ * and then use the __NR_io_getevents syscall.
+ */
+ if (timeout && timeout->tv_sec == (long)timeout->tv_sec) {
+ struct __timespec32 ts32;
+
+ ts32.tv_sec = (__kernel_long_t) timeout->tv_sec;
+ ts32.tv_nsec = (__kernel_long_t) timeout->tv_nsec;
+
+ return syscall(__NR_io_getevents, ctx, min, max, events, ts32);
+ } else {
+ return syscall(__NR_io_getevents, ctx, min, max, events, NULL);
+ }
+ }
+#endif
+
+ errno = ENOSYS;
+ return -1;
}
static void afalg_waitfd_cleanup(ASYNC_WAIT_CTX *ctx, const void *key,
}
if (eval > 0) {
+#ifdef OSSL_SANITIZE_MEMORY
+ /*
+ * In a memory sanitiser build, the changes to memory made by the
+ * system call aren't reliably detected. By initialising the
+ * memory here, the sanitiser is told that they are okay.
+ */
+ memset(events, 0, sizeof(events));
+#endif
+
/* Get results of AIO read */
r = io_getevents(aio->aio_ctx, 1, MAX_INFLIGHTS,
events, &timeout);
}
continue;
} else {
+ char strbuf[32];
+ /*
+ * sometimes __s64 is defined as long long int
+ * but on some archs ( like mips64 or powerpc64 ) it's just long int
+ *
+ * to be able to use BIO_snprintf() with %lld without warnings
+ * copy events[0].res to an long long int variable
+ *
+ * because long long int should always be at least 64 bit this should work
+ */
+ long long int op_ret = events[0].res;
+
/*
* Retries exceed for -EBUSY or unrecoverable error
* condition for this instance of operation.
ALG_WARN
("%s(%d): Crypto Operation failed with code %lld\n",
__FILE__, __LINE__, events[0].res);
+ BIO_snprintf(strbuf, sizeof(strbuf), "%lld", op_ret);
+ switch (events[0].res) {
+ case -ENOMEM:
+ AFALGerr(0, AFALG_R_KERNEL_OP_FAILED);
+ ERR_add_error_data(3, "-ENOMEM ( code ", strbuf, " )");
+ break;
+ default:
+ AFALGerr(0, AFALG_R_KERNEL_OP_FAILED);
+ ERR_add_error_data(2, "code ", strbuf);
+ break;
+ }
return 0;
}
}
/*
* vmsplice and splice are used to pin the user space input buffer for
- * kernel space processing avoiding copys from user to kernel space
+ * kernel space processing avoiding copies from user to kernel space
*/
ret = vmsplice(actx->zc_pipe[1], &iov, 1, SPLICE_F_GIFT);
if (ret < 0) {
const unsigned char *iv, int enc)
{
int ciphertype;
- int ret;
+ int ret, len;
afalg_ctx *actx;
const char *ciphername;
return 0;
}
- if (EVP_CIPHER_CTX_cipher(ctx) == NULL) {
+ if (EVP_CIPHER_CTX_get0_cipher(ctx) == NULL) {
ALG_WARN("%s(%d): Cipher object NULL\n", __FILE__, __LINE__);
return 0;
}
return 0;
}
- ciphertype = EVP_CIPHER_CTX_nid(ctx);
+ ciphertype = EVP_CIPHER_CTX_get_nid(ctx);
switch (ciphertype) {
case NID_aes_128_cbc:
case NID_aes_192_cbc:
return 0;
}
- if (ALG_AES_IV_LEN != EVP_CIPHER_CTX_iv_length(ctx)) {
+ if (ALG_AES_IV_LEN != EVP_CIPHER_CTX_get_iv_length(ctx)) {
ALG_WARN("%s(%d): Unsupported IV length :%d\n", __FILE__, __LINE__,
- EVP_CIPHER_CTX_iv_length(ctx));
+ EVP_CIPHER_CTX_get_iv_length(ctx));
return 0;
}
if (ret < 1)
return 0;
-
- ret = afalg_set_key(actx, key, EVP_CIPHER_CTX_key_length(ctx));
+ if ((len = EVP_CIPHER_CTX_get_key_length(ctx)) <= 0)
+ goto err;
+ ret = afalg_set_key(actx, key, len);
if (ret < 1)
goto err;
* set iv now for decrypt operation as the input buffer can be
* overwritten for inplace operation where in = out.
*/
- if (EVP_CIPHER_CTX_encrypting(ctx) == 0) {
+ if (EVP_CIPHER_CTX_is_encrypting(ctx) == 0) {
memcpy(nxtiv, in + (inl - ALG_AES_IV_LEN), ALG_AES_IV_LEN);
}
/* Send input data to kernel space */
ret = afalg_start_cipher_sk(actx, (unsigned char *)in, inl,
EVP_CIPHER_CTX_iv(ctx),
- EVP_CIPHER_CTX_encrypting(ctx));
+ EVP_CIPHER_CTX_is_encrypting(ctx));
if (ret < 1) {
return 0;
}
if (ret < 1)
return 0;
- if (EVP_CIPHER_CTX_encrypting(ctx)) {
+ if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), out + (inl - ALG_AES_IV_LEN),
ALG_AES_IV_LEN);
} else {
}
actx = (afalg_ctx *) EVP_CIPHER_CTX_get_cipher_data(ctx);
- if (actx == NULL || actx->init_done != MAGIC_INIT_NUM) {
- ALG_WARN("%s afalg ctx passed\n",
- ctx == NULL ? "NULL" : "Uninitialised");
- return 0;
- }
+ if (actx == NULL || actx->init_done != MAGIC_INIT_NUM)
+ return 1;
close(actx->sfd);
close(actx->bfd);
static const EVP_CIPHER *afalg_aes_cbc(int nid)
{
cbc_handles *cipher_handle = get_cipher_handle(nid);
+
+ if (cipher_handle == NULL)
+ return NULL;
if (cipher_handle->_hidden == NULL
&& ((cipher_handle->_hidden =
EVP_CIPHER_meth_new(nid,
* now, as bind_aflag can only be called by one thread at a
* time.
*/
- for(i = 0; i < OSSL_NELEM(afalg_cipher_nids); i++) {
+ for (i = 0; i < OSSL_NELEM(afalg_cipher_nids); i++) {
if (afalg_aes_cbc(afalg_cipher_nids[i]) == NULL) {
AFALGerr(AFALG_F_BIND_AFALG, AFALG_R_INIT_FAILED);
return 0;
if (!afalg_chk_platform())
return 0;
- if (!bind_afalg(e))
+ if (!bind_afalg(e)) {
+ afalg_destroy(e);
return 0;
+ }
return 1;
}
toadd = engine_afalg();
if (toadd == NULL)
return;
+ ERR_set_mark();
ENGINE_add(toadd);
+ /*
+ * If the "add" worked, it gets a structural reference. So either way, we
+ * release our just-created reference.
+ */
ENGINE_free(toadd);
- ERR_clear_error();
+ /*
+ * If the "add" didn't work, it was probably a conflict because it was
+ * already added (eg. someone calling ENGINE_load_blah then calling
+ * ENGINE_load_builtin_engines() perhaps).
+ */
+ ERR_pop_to_mark();
}
# endif
static int free_cbc(void)
{
short unsigned int i;
- for(i = 0; i < OSSL_NELEM(afalg_cipher_nids); i++) {
+ for (i = 0; i < OSSL_NELEM(afalg_cipher_nids); i++) {
EVP_CIPHER_meth_free(cbc_handle[i]._hidden);
cbc_handle[i]._hidden = NULL;
}