/*
- * Copyright 2017 The OpenSSL Project Authors. All Rights Reserved.
+ * Copyright 2017-2018 The OpenSSL Project Authors. All Rights Reserved.
*
- * Licensed under the OpenSSL license (the "License"). You may not use
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
# define CHECK_BSD_STYLE_MACROS
#endif
+/*
+ * ONE global file descriptor for all sessions. This allows operations
+ * such as digest session data copying (see digest_copy()), but is also
+ * saner... why re-open /dev/crypto for every session?
+ */
+static int cfd;
+
/******************************************************************************
*
* Ciphers
*****/
struct cipher_ctx {
- int cfd;
struct session_op sess;
-
- /* to pass from init to do_cipher */
- const unsigned char *iv;
int op; /* COP_ENCRYPT or COP_DECRYPT */
+ unsigned long mode; /* EVP_CIPH_*_MODE */
+
+ /* to handle ctr mode being a stream cipher */
+ unsigned char partial[EVP_MAX_BLOCK_LENGTH];
+ unsigned int blocksize, num;
};
static const struct cipher_data_st {
{ NID_aes_192_cbc, 16, 192 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC },
{ NID_aes_256_cbc, 16, 256 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC },
#ifndef OPENSSL_NO_RC4
- { NID_rc4, 1, 16, 0, CRYPTO_ARC4 },
+ { NID_rc4, 1, 16, 0, EVP_CIPH_STREAM_CIPHER, CRYPTO_ARC4 },
#endif
#if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_AES_CTR)
{ NID_aes_128_ctr, 16, 128 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR },
{ NID_aes_256_xts, 16, 256 / 8 * 2, 16, EVP_CIPH_XTS_MODE, CRYPTO_AES_XTS },
#endif
#if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_AES_ECB)
- { NID_aes_128_ecb, 16, 128 / 8, 16, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
- { NID_aes_192_ecb, 16, 192 / 8, 16, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
- { NID_aes_256_ecb, 16, 256 / 8, 16, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
+ { NID_aes_128_ecb, 16, 128 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
+ { NID_aes_192_ecb, 16, 192 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
+ { NID_aes_256_ecb, 16, 256 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
#endif
#if 0 /* Not yet supported */
{ NID_aes_128_gcm, 16, 128 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM },
const struct cipher_data_st *cipher_d =
get_cipher_data(EVP_CIPHER_CTX_nid(ctx));
- if ((cipher_ctx->cfd = open("/dev/crypto", O_RDWR, 0)) < 0) {
- SYSerr(SYS_F_OPEN, errno);
- return 0;
- }
-
memset(&cipher_ctx->sess, 0, sizeof(cipher_ctx->sess));
cipher_ctx->sess.cipher = cipher_d->devcryptoid;
cipher_ctx->sess.keylen = cipher_d->keylen;
cipher_ctx->sess.key = (void *)key;
cipher_ctx->op = enc ? COP_ENCRYPT : COP_DECRYPT;
- if (ioctl(cipher_ctx->cfd, CIOCGSESSION, &cipher_ctx->sess) < 0) {
+ cipher_ctx->mode = cipher_d->flags & EVP_CIPH_MODE;
+ cipher_ctx->blocksize = cipher_d->blocksize;
+ if (ioctl(cfd, CIOCGSESSION, &cipher_ctx->sess) < 0) {
SYSerr(SYS_F_IOCTL, errno);
- close(cipher_ctx->cfd);
return 0;
}
struct cipher_ctx *cipher_ctx =
(struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
struct crypt_op cryp;
+ unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
#if !defined(COP_FLAG_WRITE_IV)
unsigned char saved_iv[EVP_MAX_IV_LENGTH];
+ const unsigned char *ivptr;
+ size_t nblocks, ivlen;
#endif
memset(&cryp, 0, sizeof(cryp));
cryp.len = inl;
cryp.src = (void *)in;
cryp.dst = (void *)out;
- cryp.iv = (void *)EVP_CIPHER_CTX_iv_noconst(ctx);
+ cryp.iv = (void *)iv;
cryp.op = cipher_ctx->op;
#if !defined(COP_FLAG_WRITE_IV)
cryp.flags = 0;
- if (EVP_CIPHER_CTX_iv_length(ctx) > 0) {
- assert(inl >= EVP_CIPHER_CTX_iv_length(ctx));
- if (!EVP_CIPHER_CTX_encrypting(ctx)) {
- unsigned char *ivptr = in + inl - EVP_CIPHER_CTX_iv_length(ctx);
-
- memcpy(saved_iv, ivptr, EVP_CIPHER_CTX_iv_length(ctx));
+ ivlen = EVP_CIPHER_CTX_iv_length(ctx);
+ if (ivlen > 0)
+ switch (cipher_ctx->mode) {
+ case EVP_CIPH_CBC_MODE:
+ assert(inl >= ivlen);
+ if (!EVP_CIPHER_CTX_encrypting(ctx)) {
+ ivptr = in + inl - ivlen;
+ memcpy(saved_iv, ivptr, ivlen);
+ }
+ break;
+
+ case EVP_CIPH_CTR_MODE:
+ break;
+
+ default: /* should not happen */
+ return 0;
}
- }
#else
cryp.flags = COP_FLAG_WRITE_IV;
#endif
- if (ioctl(cipher_ctx->cfd, CIOCCRYPT, &cryp) < 0) {
+ if (ioctl(cfd, CIOCCRYPT, &cryp) < 0) {
SYSerr(SYS_F_IOCTL, errno);
return 0;
}
#if !defined(COP_FLAG_WRITE_IV)
- if (EVP_CIPHER_CTX_iv_length(ctx) > 0) {
- unsigned char *ivptr = saved_iv;
+ if (ivlen > 0)
+ switch (cipher_ctx->mode) {
+ case EVP_CIPH_CBC_MODE:
+ assert(inl >= ivlen);
+ if (EVP_CIPHER_CTX_encrypting(ctx))
+ ivptr = out + inl - ivlen;
+ else
+ ivptr = saved_iv;
+
+ memcpy(iv, ivptr, ivlen);
+ break;
+
+ case EVP_CIPH_CTR_MODE:
+ nblocks = (inl + cipher_ctx->blocksize - 1)
+ / cipher_ctx->blocksize;
+ do {
+ ivlen--;
+ nblocks += iv[ivlen];
+ iv[ivlen] = (uint8_t) nblocks;
+ nblocks >>= 8;
+ } while (ivlen);
+ break;
+
+ default: /* should not happen */
+ return 0;
+ }
+#endif
+
+ return 1;
+}
+
+static int ctr_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+ const unsigned char *in, size_t inl)
+{
+ struct cipher_ctx *cipher_ctx =
+ (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
+ size_t nblocks, len;
- assert(inl >= EVP_CIPHER_CTX_iv_length(ctx));
- if (!EVP_CIPHER_CTX_encrypting(ctx))
- ivptr = out + inl - EVP_CIPHER_CTX_iv_length(ctx);
+ /* initial partial block */
+ while (cipher_ctx->num && inl) {
+ (*out++) = *(in++) ^ cipher_ctx->partial[cipher_ctx->num];
+ --inl;
+ cipher_ctx->num = (cipher_ctx->num + 1) % cipher_ctx->blocksize;
+ }
- memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), ivptr,
- EVP_CIPHER_CTX_iv_length(ctx));
+ /* full blocks */
+ if (inl > (unsigned int) cipher_ctx->blocksize) {
+ nblocks = inl/cipher_ctx->blocksize;
+ len = nblocks * cipher_ctx->blocksize;
+ if (cipher_do_cipher(ctx, out, in, len) < 1)
+ return 0;
+ inl -= len;
+ out += len;
+ in += len;
+ }
+
+ /* final partial block */
+ if (inl) {
+ memset(cipher_ctx->partial, 0, cipher_ctx->blocksize);
+ if (cipher_do_cipher(ctx, cipher_ctx->partial, cipher_ctx->partial,
+ cipher_ctx->blocksize) < 1)
+ return 0;
+ while (inl--) {
+ out[cipher_ctx->num] = in[cipher_ctx->num]
+ ^ cipher_ctx->partial[cipher_ctx->num];
+ cipher_ctx->num++;
+ }
}
-#endif
return 1;
}
+static int cipher_ctrl(EVP_CIPHER_CTX *ctx, int type, int p1, void* p2)
+{
+ EVP_CIPHER_CTX *to_ctx = (EVP_CIPHER_CTX *)p2;
+ struct cipher_ctx *cipher_ctx;
+
+ if (type == EVP_CTRL_COPY) {
+ /* when copying the context, a new session needs to be initialized */
+ cipher_ctx = (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
+ return (cipher_ctx == NULL)
+ || cipher_init(to_ctx, cipher_ctx->sess.key, EVP_CIPHER_CTX_iv(ctx),
+ (cipher_ctx->op == COP_ENCRYPT));
+ }
+
+ return -1;
+}
+
static int cipher_cleanup(EVP_CIPHER_CTX *ctx)
{
struct cipher_ctx *cipher_ctx =
(struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
- if (ioctl(cipher_ctx->cfd, CIOCFSESSION, &cipher_ctx->sess) < 0) {
+ if (ioctl(cfd, CIOCFSESSION, &cipher_ctx->sess.ses) < 0) {
SYSerr(SYS_F_IOCTL, errno);
return 0;
}
- if (close(cipher_ctx->cfd) < 0) {
- SYSerr(SYS_F_CLOSE, errno);
- return 0;
- }
return 1;
}
static int known_cipher_nids_amount = -1; /* -1 indicates not yet initialised */
static EVP_CIPHER *known_cipher_methods[OSSL_NELEM(cipher_data)] = { NULL, };
-static void prepare_cipher_methods()
+static void prepare_cipher_methods(void)
{
size_t i;
struct session_op sess;
- int cfd;
-
- if ((cfd = open("/dev/crypto", O_RDWR, 0)) < 0)
- return;
+ unsigned long cipher_mode;
memset(&sess, 0, sizeof(sess));
sess.key = (void *)"01234567890123456789012345678901234567890123456789";
sess.cipher = cipher_data[i].devcryptoid;
sess.keylen = cipher_data[i].keylen;
if (ioctl(cfd, CIOCGSESSION, &sess) < 0
- || ioctl(cfd, CIOCFSESSION, &sess) < 0)
+ || ioctl(cfd, CIOCFSESSION, &sess.ses) < 0)
continue;
+ cipher_mode = cipher_data[i].flags & EVP_CIPH_MODE;
+
if ((known_cipher_methods[i] =
EVP_CIPHER_meth_new(cipher_data[i].nid,
- cipher_data[i].blocksize,
+ cipher_mode == EVP_CIPH_CTR_MODE ? 1 :
+ cipher_data[i].blocksize,
cipher_data[i].keylen)) == NULL
|| !EVP_CIPHER_meth_set_iv_length(known_cipher_methods[i],
cipher_data[i].ivlen)
|| !EVP_CIPHER_meth_set_flags(known_cipher_methods[i],
cipher_data[i].flags
+ | EVP_CIPH_CUSTOM_COPY
| EVP_CIPH_FLAG_DEFAULT_ASN1)
|| !EVP_CIPHER_meth_set_init(known_cipher_methods[i], cipher_init)
|| !EVP_CIPHER_meth_set_do_cipher(known_cipher_methods[i],
+ cipher_mode == EVP_CIPH_CTR_MODE ?
+ ctr_do_cipher :
cipher_do_cipher)
+ || !EVP_CIPHER_meth_set_ctrl(known_cipher_methods[i], cipher_ctrl)
|| !EVP_CIPHER_meth_set_cleanup(known_cipher_methods[i],
cipher_cleanup)
|| !EVP_CIPHER_meth_set_impl_ctx_size(known_cipher_methods[i],
cipher_data[i].nid;
}
}
-
- close(cfd);
}
static const EVP_CIPHER *get_cipher_method(int nid)
known_cipher_methods[i] = NULL;
}
-static void destroy_all_cipher_methods()
+static void destroy_all_cipher_methods(void)
{
size_t i;
/*
* We only support digests if the cryptodev implementation supports multiple
- * data updates. Otherwise, we would be forced to maintain a cache, which is
- * perilous if there's a lot of data coming in (if someone wants to checksum
- * an OpenSSL tarball, for example).
+ * data updates and session copying. Otherwise, we would be forced to maintain
+ * a cache, which is perilous if there's a lot of data coming in (if someone
+ * wants to checksum an OpenSSL tarball, for example).
*/
-#if defined(COP_FLAG_UPDATE) && defined(COP_FLAG_FINAL)
+#if defined(CIOCCPHASH) && defined(COP_FLAG_UPDATE) && defined(COP_FLAG_FINAL)
+#define IMPLEMENT_DIGEST
/******************************************************************************
*
*****/
struct digest_ctx {
- int cfd;
struct session_op sess;
- int init;
+ /* This signals that the init function was called, not that it succeeded. */
+ int init_called;
};
static const struct digest_data_st {
const struct digest_data_st *digest_d =
get_digest_data(EVP_MD_CTX_type(ctx));
- if (digest_ctx->init == 0
- && (digest_ctx->cfd = open("/dev/crypto", O_RDWR, 0)) < 0) {
- SYSerr(SYS_F_OPEN, errno);
- return 0;
- }
-
- digest_ctx->init = 1;
+ digest_ctx->init_called = 1;
memset(&digest_ctx->sess, 0, sizeof(digest_ctx->sess));
digest_ctx->sess.mac = digest_d->devcryptoid;
- if (ioctl(digest_ctx->cfd, CIOCGSESSION, &digest_ctx->sess) < 0) {
+ if (ioctl(cfd, CIOCGSESSION, &digest_ctx->sess) < 0) {
SYSerr(SYS_F_IOCTL, errno);
- close(digest_ctx->cfd);
return 0;
}
cryp.dst = NULL;
cryp.mac = res;
cryp.flags = flags;
- return ioctl(ctx->cfd, CIOCCRYPT, &cryp);
+ return ioctl(cfd, CIOCCRYPT, &cryp);
}
static int digest_update(EVP_MD_CTX *ctx, const void *data, size_t count)
if (count == 0)
return 1;
+ if (digest_ctx == NULL)
+ return 0;
+
if (digest_op(digest_ctx, data, count, NULL, COP_FLAG_UPDATE) < 0) {
SYSerr(SYS_F_IOCTL, errno);
return 0;
struct digest_ctx *digest_ctx =
(struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
+ if (md == NULL || digest_ctx == NULL)
+ return 0;
if (digest_op(digest_ctx, NULL, 0, md, COP_FLAG_FINAL) < 0) {
SYSerr(SYS_F_IOCTL, errno);
return 0;
}
- if (ioctl(digest_ctx->cfd, CIOCFSESSION, &digest_ctx->sess) < 0) {
+
+ return 1;
+}
+
+static int digest_copy(EVP_MD_CTX *to, const EVP_MD_CTX *from)
+{
+ struct digest_ctx *digest_from =
+ (struct digest_ctx *)EVP_MD_CTX_md_data(from);
+ struct digest_ctx *digest_to =
+ (struct digest_ctx *)EVP_MD_CTX_md_data(to);
+ struct cphash_op cphash;
+
+ if (digest_from == NULL || digest_from->init_called != 1)
+ return 1;
+
+ if (!digest_init(to)) {
SYSerr(SYS_F_IOCTL, errno);
return 0;
}
+ cphash.src_ses = digest_from->sess.ses;
+ cphash.dst_ses = digest_to->sess.ses;
+ if (ioctl(cfd, CIOCCPHASH, &cphash) < 0) {
+ SYSerr(SYS_F_IOCTL, errno);
+ return 0;
+ }
return 1;
}
struct digest_ctx *digest_ctx =
(struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
- if (close(digest_ctx->cfd) < 0) {
- SYSerr(SYS_F_CLOSE, errno);
+ if (digest_ctx == NULL)
+ return 1;
+ if (ioctl(cfd, CIOCFSESSION, &digest_ctx->sess.ses) < 0) {
+ SYSerr(SYS_F_IOCTL, errno);
return 0;
}
-
return 1;
}
+static int devcrypto_test_digest(size_t digest_data_index)
+{
+ struct session_op sess1, sess2;
+ struct cphash_op cphash;
+ int ret=0;
+
+ memset(&sess1, 0, sizeof(sess1));
+ memset(&sess2, 0, sizeof(sess2));
+ sess1.mac = digest_data[digest_data_index].devcryptoid;
+ if (ioctl(cfd, CIOCGSESSION, &sess1) < 0)
+ return 0;
+ /* Make sure the driver is capable of hash state copy */
+ sess2.mac = sess1.mac;
+ if (ioctl(cfd, CIOCGSESSION, &sess2) >= 0) {
+ cphash.src_ses = sess1.ses;
+ cphash.dst_ses = sess2.ses;
+ if (ioctl(cfd, CIOCCPHASH, &cphash) >= 0)
+ ret = 1;
+ ioctl(cfd, CIOCFSESSION, &sess2.ses);
+ }
+ ioctl(cfd, CIOCFSESSION, &sess1.ses);
+ return ret;
+}
+
/*
* Keep a table of known nids and associated methods.
* Note that known_digest_nids[] isn't necessarily indexed the same way as
static int known_digest_nids_amount = -1; /* -1 indicates not yet initialised */
static EVP_MD *known_digest_methods[OSSL_NELEM(digest_data)] = { NULL, };
-static void prepare_digest_methods()
+static void prepare_digest_methods(void)
{
size_t i;
- struct session_op sess;
- int cfd;
-
- if ((cfd = open("/dev/crypto", O_RDWR, 0)) < 0)
- return;
-
- memset(&sess, 0, sizeof(sess));
for (i = 0, known_digest_nids_amount = 0; i < OSSL_NELEM(digest_data);
i++) {
/*
- * Check that the algo is really availably by trying to open and close
- * a session.
+ * Check that the algo is usable
*/
- sess.mac = digest_data[i].devcryptoid;
- if (ioctl(cfd, CIOCGSESSION, &sess) < 0
- || ioctl(cfd, CIOCFSESSION, &sess) < 0)
+ if (!devcrypto_test_digest(i))
continue;
if ((known_digest_methods[i] = EVP_MD_meth_new(digest_data[i].nid,
|| !EVP_MD_meth_set_init(known_digest_methods[i], digest_init)
|| !EVP_MD_meth_set_update(known_digest_methods[i], digest_update)
|| !EVP_MD_meth_set_final(known_digest_methods[i], digest_final)
+ || !EVP_MD_meth_set_copy(known_digest_methods[i], digest_copy)
|| !EVP_MD_meth_set_cleanup(known_digest_methods[i], digest_cleanup)
|| !EVP_MD_meth_set_app_datasize(known_digest_methods[i],
sizeof(struct digest_ctx))) {
known_digest_nids[known_digest_nids_amount++] = digest_data[i].nid;
}
}
-
- close(cfd);
}
static const EVP_MD *get_digest_method(int nid)
known_digest_methods[i] = NULL;
}
-static void destroy_all_digest_methods()
+static void destroy_all_digest_methods(void)
{
size_t i;
static int devcrypto_unload(ENGINE *e)
{
destroy_all_cipher_methods();
-#if defined(COP_FLAG_UPDATE) && defined(COP_FLAG_FINAL)
+#ifdef IMPLEMENT_DIGEST
destroy_all_digest_methods();
#endif
+
+ close(cfd);
+
return 1;
}
/*
{
ENGINE *e = NULL;
- if (access("/dev/crypto", R_OK | W_OK) < 0) {
- fprintf(stderr,
- "/dev/crypto not present, not enabling devcrypto engine\n");
+ if ((cfd = open("/dev/crypto", O_RDWR, 0)) < 0) {
+ fprintf(stderr, "Could not open /dev/crypto: %s\n", strerror(errno));
+ return;
+ }
+
+ if ((e = ENGINE_new()) == NULL
+ || !ENGINE_set_destroy_function(e, devcrypto_unload)) {
+ ENGINE_free(e);
+ /*
+ * We know that devcrypto_unload() won't be called when one of the
+ * above two calls have failed, so we close cfd explicitly here to
+ * avoid leaking resources.
+ */
+ close(cfd);
return;
}
prepare_cipher_methods();
-#if defined(COP_FLAG_UPDATE) && defined(COP_FLAG_FINAL)
+#ifdef IMPLEMENT_DIGEST
prepare_digest_methods();
#endif
- if ((e = ENGINE_new()) == NULL)
- return;
-
if (!ENGINE_set_id(e, "devcrypto")
|| !ENGINE_set_name(e, "/dev/crypto engine")
- || !ENGINE_set_destroy_function(e, devcrypto_unload)
/*
* Asymmetric ciphers aren't well supported with /dev/crypto. Among the BSD
# endif
#endif
|| !ENGINE_set_ciphers(e, devcrypto_ciphers)
-#if defined(COP_FLAG_UPDATE) && defined(COP_FLAG_FINAL)
+#ifdef IMPLEMENT_DIGEST
|| !ENGINE_set_digests(e, devcrypto_digests)
#endif
) {