2 * Copyright 2017-2018 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
12 #include <sys/types.h>
15 #include <sys/ioctl.h>
19 #include <openssl/conf.h>
20 #include <openssl/evp.h>
21 #include <openssl/err.h>
22 #include <openssl/engine.h>
23 #include <openssl/objects.h>
24 #include <crypto/cryptodev.h>
26 #include "internal/engine.h"
28 #ifdef CRYPTO_ALGORITHM_MIN
29 # define CHECK_BSD_STYLE_MACROS
33 * ONE global file descriptor for all sessions. This allows operations
34 * such as digest session data copying (see digest_copy()), but is also
35 * saner... why re-open /dev/crypto for every session?
38 #define DEVCRYPTO_REQUIRE_ACCELERATED 0 /* require confirmation of acceleration */
39 #define DEVCRYPTO_USE_SOFTWARE 1 /* allow software drivers */
40 #define DEVCRYPTO_REJECT_SOFTWARE 2 /* only disallow confirmed software drivers */
42 #define DEVCRYPTO_DEFAULT_USE_SOFDTRIVERS DEVCRYPTO_REJECT_SOFTWARE
43 static int use_softdrivers = DEVCRYPTO_DEFAULT_USE_SOFDTRIVERS;
46 * cipher/digest status & acceleration definitions
47 * Make sure the defaults are set to 0
49 struct driver_info_st {
50 enum devcrypto_status_t {
51 DEVCRYPTO_STATUS_UNUSABLE = -1, /* session open failed */
52 DEVCRYPTO_STATUS_UNKNOWN = 0, /* not tested yet */
53 DEVCRYPTO_STATUS_USABLE = 1 /* algo can be used */
56 enum devcrypto_accelerated_t {
57 DEVCRYPTO_NOT_ACCELERATED = -1, /* software implemented */
58 DEVCRYPTO_ACCELERATION_UNKNOWN = 0, /* acceleration support unkown */
59 DEVCRYPTO_ACCELERATED = 1 /* hardware accelerated */
63 /******************************************************************************
67 * Because they all do the same basic operation, we have only one set of
68 * method functions for them all to share, and a mapping table between
69 * NIDs and cryptodev IDs, with all the necessary size data.
74 struct session_op sess;
75 int op; /* COP_ENCRYPT or COP_DECRYPT */
76 unsigned long mode; /* EVP_CIPH_*_MODE */
78 /* to handle ctr mode being a stream cipher */
79 unsigned char partial[EVP_MAX_BLOCK_LENGTH];
80 unsigned int blocksize, num;
83 static const struct cipher_data_st {
91 #ifndef OPENSSL_NO_DES
92 { NID_des_cbc, 8, 8, 8, EVP_CIPH_CBC_MODE, CRYPTO_DES_CBC },
93 { NID_des_ede3_cbc, 8, 24, 8, EVP_CIPH_CBC_MODE, CRYPTO_3DES_CBC },
96 { NID_bf_cbc, 8, 16, 8, EVP_CIPH_CBC_MODE, CRYPTO_BLF_CBC },
98 #ifndef OPENSSL_NO_CAST
99 { NID_cast5_cbc, 8, 16, 8, EVP_CIPH_CBC_MODE, CRYPTO_CAST_CBC },
101 { NID_aes_128_cbc, 16, 128 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC },
102 { NID_aes_192_cbc, 16, 192 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC },
103 { NID_aes_256_cbc, 16, 256 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC },
104 #ifndef OPENSSL_NO_RC4
105 { NID_rc4, 1, 16, 0, EVP_CIPH_STREAM_CIPHER, CRYPTO_ARC4 },
107 #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_AES_CTR)
108 { NID_aes_128_ctr, 16, 128 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR },
109 { NID_aes_192_ctr, 16, 192 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR },
110 { NID_aes_256_ctr, 16, 256 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR },
112 #if 0 /* Not yet supported */
113 { NID_aes_128_xts, 16, 128 / 8 * 2, 16, EVP_CIPH_XTS_MODE, CRYPTO_AES_XTS },
114 { NID_aes_256_xts, 16, 256 / 8 * 2, 16, EVP_CIPH_XTS_MODE, CRYPTO_AES_XTS },
116 #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_AES_ECB)
117 { NID_aes_128_ecb, 16, 128 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
118 { NID_aes_192_ecb, 16, 192 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
119 { NID_aes_256_ecb, 16, 256 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
121 #if 0 /* Not yet supported */
122 { NID_aes_128_gcm, 16, 128 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM },
123 { NID_aes_192_gcm, 16, 192 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM },
124 { NID_aes_256_gcm, 16, 256 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM },
126 #ifndef OPENSSL_NO_CAMELLIA
127 { NID_camellia_128_cbc, 16, 128 / 8, 16, EVP_CIPH_CBC_MODE,
128 CRYPTO_CAMELLIA_CBC },
129 { NID_camellia_192_cbc, 16, 192 / 8, 16, EVP_CIPH_CBC_MODE,
130 CRYPTO_CAMELLIA_CBC },
131 { NID_camellia_256_cbc, 16, 256 / 8, 16, EVP_CIPH_CBC_MODE,
132 CRYPTO_CAMELLIA_CBC },
136 static size_t find_cipher_data_index(int nid)
140 for (i = 0; i < OSSL_NELEM(cipher_data); i++)
141 if (nid == cipher_data[i].nid)
146 static size_t get_cipher_data_index(int nid)
148 size_t i = find_cipher_data_index(nid);
154 * Code further down must make sure that only NIDs in the table above
155 * are used. If any other NID reaches this function, there's a grave
156 * coding error further down.
158 assert("Code that never should be reached" == NULL);
162 static const struct cipher_data_st *get_cipher_data(int nid)
164 return &cipher_data[get_cipher_data_index(nid)];
168 * Following are the three necessary functions to map OpenSSL functionality
172 static int cipher_init(EVP_CIPHER_CTX *ctx, const unsigned char *key,
173 const unsigned char *iv, int enc)
175 struct cipher_ctx *cipher_ctx =
176 (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
177 const struct cipher_data_st *cipher_d =
178 get_cipher_data(EVP_CIPHER_CTX_nid(ctx));
180 memset(&cipher_ctx->sess, 0, sizeof(cipher_ctx->sess));
181 cipher_ctx->sess.cipher = cipher_d->devcryptoid;
182 cipher_ctx->sess.keylen = cipher_d->keylen;
183 cipher_ctx->sess.key = (void *)key;
184 cipher_ctx->op = enc ? COP_ENCRYPT : COP_DECRYPT;
185 cipher_ctx->mode = cipher_d->flags & EVP_CIPH_MODE;
186 cipher_ctx->blocksize = cipher_d->blocksize;
187 if (ioctl(cfd, CIOCGSESSION, &cipher_ctx->sess) < 0) {
188 SYSerr(SYS_F_IOCTL, errno);
195 static int cipher_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
196 const unsigned char *in, size_t inl)
198 struct cipher_ctx *cipher_ctx =
199 (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
200 struct crypt_op cryp;
201 unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
202 #if !defined(COP_FLAG_WRITE_IV)
203 unsigned char saved_iv[EVP_MAX_IV_LENGTH];
204 const unsigned char *ivptr;
205 size_t nblocks, ivlen;
208 memset(&cryp, 0, sizeof(cryp));
209 cryp.ses = cipher_ctx->sess.ses;
211 cryp.src = (void *)in;
212 cryp.dst = (void *)out;
213 cryp.iv = (void *)iv;
214 cryp.op = cipher_ctx->op;
215 #if !defined(COP_FLAG_WRITE_IV)
218 ivlen = EVP_CIPHER_CTX_iv_length(ctx);
220 switch (cipher_ctx->mode) {
221 case EVP_CIPH_CBC_MODE:
222 assert(inl >= ivlen);
223 if (!EVP_CIPHER_CTX_encrypting(ctx)) {
224 ivptr = in + inl - ivlen;
225 memcpy(saved_iv, ivptr, ivlen);
229 case EVP_CIPH_CTR_MODE:
232 default: /* should not happen */
236 cryp.flags = COP_FLAG_WRITE_IV;
239 if (ioctl(cfd, CIOCCRYPT, &cryp) < 0) {
240 SYSerr(SYS_F_IOCTL, errno);
244 #if !defined(COP_FLAG_WRITE_IV)
246 switch (cipher_ctx->mode) {
247 case EVP_CIPH_CBC_MODE:
248 assert(inl >= ivlen);
249 if (EVP_CIPHER_CTX_encrypting(ctx))
250 ivptr = out + inl - ivlen;
254 memcpy(iv, ivptr, ivlen);
257 case EVP_CIPH_CTR_MODE:
258 nblocks = (inl + cipher_ctx->blocksize - 1)
259 / cipher_ctx->blocksize;
262 nblocks += iv[ivlen];
263 iv[ivlen] = (uint8_t) nblocks;
268 default: /* should not happen */
276 static int ctr_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
277 const unsigned char *in, size_t inl)
279 struct cipher_ctx *cipher_ctx =
280 (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
283 /* initial partial block */
284 while (cipher_ctx->num && inl) {
285 (*out++) = *(in++) ^ cipher_ctx->partial[cipher_ctx->num];
287 cipher_ctx->num = (cipher_ctx->num + 1) % cipher_ctx->blocksize;
291 if (inl > (unsigned int) cipher_ctx->blocksize) {
292 nblocks = inl/cipher_ctx->blocksize;
293 len = nblocks * cipher_ctx->blocksize;
294 if (cipher_do_cipher(ctx, out, in, len) < 1)
301 /* final partial block */
303 memset(cipher_ctx->partial, 0, cipher_ctx->blocksize);
304 if (cipher_do_cipher(ctx, cipher_ctx->partial, cipher_ctx->partial,
305 cipher_ctx->blocksize) < 1)
308 out[cipher_ctx->num] = in[cipher_ctx->num]
309 ^ cipher_ctx->partial[cipher_ctx->num];
317 static int cipher_ctrl(EVP_CIPHER_CTX *ctx, int type, int p1, void* p2)
319 EVP_CIPHER_CTX *to_ctx = (EVP_CIPHER_CTX *)p2;
320 struct cipher_ctx *cipher_ctx;
322 if (type == EVP_CTRL_COPY) {
323 /* when copying the context, a new session needs to be initialized */
324 cipher_ctx = (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
325 return (cipher_ctx == NULL)
326 || cipher_init(to_ctx, cipher_ctx->sess.key, EVP_CIPHER_CTX_iv(ctx),
327 (cipher_ctx->op == COP_ENCRYPT));
333 static int cipher_cleanup(EVP_CIPHER_CTX *ctx)
335 struct cipher_ctx *cipher_ctx =
336 (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
338 if (ioctl(cfd, CIOCFSESSION, &cipher_ctx->sess.ses) < 0) {
339 SYSerr(SYS_F_IOCTL, errno);
347 * Keep tables of known nids, associated methods, selected ciphers, and driver
349 * Note that known_cipher_nids[] isn't necessarily indexed the same way as
350 * cipher_data[] above, which the other tables are.
352 static int known_cipher_nids[OSSL_NELEM(cipher_data)];
353 static int known_cipher_nids_amount = -1; /* -1 indicates not yet initialised */
354 static EVP_CIPHER *known_cipher_methods[OSSL_NELEM(cipher_data)] = { NULL, };
355 static int selected_ciphers[OSSL_NELEM(cipher_data)];
356 static struct driver_info_st cipher_driver_info[OSSL_NELEM(cipher_data)];
359 static int devcrypto_test_cipher(size_t cipher_data_index)
361 return (cipher_driver_info[cipher_data_index].status == DEVCRYPTO_STATUS_USABLE
362 && selected_ciphers[cipher_data_index] == 1
363 && (cipher_driver_info[cipher_data_index].accelerated
364 == DEVCRYPTO_ACCELERATED
365 || use_softdrivers == DEVCRYPTO_USE_SOFTWARE
366 || (cipher_driver_info[cipher_data_index].accelerated
367 != DEVCRYPTO_NOT_ACCELERATED
368 && use_softdrivers == DEVCRYPTO_REJECT_SOFTWARE)));
371 static void prepare_cipher_methods(void)
374 struct session_op sess;
375 unsigned long cipher_mode;
377 struct session_info_op siop;
380 memset(&cipher_driver_info, 0, sizeof(cipher_driver_info));
382 memset(&sess, 0, sizeof(sess));
383 sess.key = (void *)"01234567890123456789012345678901234567890123456789";
385 for (i = 0, known_cipher_nids_amount = 0;
386 i < OSSL_NELEM(cipher_data); i++) {
388 selected_ciphers[i] = 1;
390 * Check that the cipher is usable
392 sess.cipher = cipher_data[i].devcryptoid;
393 sess.keylen = cipher_data[i].keylen;
394 if (ioctl(cfd, CIOCGSESSION, &sess) < 0) {
395 cipher_driver_info[i].status = DEVCRYPTO_STATUS_UNUSABLE;
399 cipher_mode = cipher_data[i].flags & EVP_CIPH_MODE;
401 if ((known_cipher_methods[i] =
402 EVP_CIPHER_meth_new(cipher_data[i].nid,
403 cipher_mode == EVP_CIPH_CTR_MODE ? 1 :
404 cipher_data[i].blocksize,
405 cipher_data[i].keylen)) == NULL
406 || !EVP_CIPHER_meth_set_iv_length(known_cipher_methods[i],
407 cipher_data[i].ivlen)
408 || !EVP_CIPHER_meth_set_flags(known_cipher_methods[i],
410 | EVP_CIPH_CUSTOM_COPY
411 | EVP_CIPH_FLAG_DEFAULT_ASN1)
412 || !EVP_CIPHER_meth_set_init(known_cipher_methods[i], cipher_init)
413 || !EVP_CIPHER_meth_set_do_cipher(known_cipher_methods[i],
414 cipher_mode == EVP_CIPH_CTR_MODE ?
417 || !EVP_CIPHER_meth_set_ctrl(known_cipher_methods[i], cipher_ctrl)
418 || !EVP_CIPHER_meth_set_cleanup(known_cipher_methods[i],
420 || !EVP_CIPHER_meth_set_impl_ctx_size(known_cipher_methods[i],
421 sizeof(struct cipher_ctx))) {
422 cipher_driver_info[i].status = DEVCRYPTO_STATUS_UNUSABLE;
423 EVP_CIPHER_meth_free(known_cipher_methods[i]);
424 known_cipher_methods[i] = NULL;
426 cipher_driver_info[i].status = DEVCRYPTO_STATUS_USABLE;
429 if (ioctl(cfd, CIOCGSESSINFO, &siop) < 0)
430 cipher_driver_info[i].accelerated = DEVCRYPTO_ACCELERATION_UNKNOWN;
431 else if (!(siop.flags & SIOP_FLAG_KERNEL_DRIVER_ONLY))
432 cipher_driver_info[i].accelerated = DEVCRYPTO_NOT_ACCELERATED;
434 cipher_driver_info[i].accelerated = DEVCRYPTO_ACCELERATED;
435 #endif /* CIOCGSESSINFO */
437 ioctl(cfd, CIOCFSESSION, &sess.ses);
438 if (devcrypto_test_cipher(i)) {
439 known_cipher_nids[known_cipher_nids_amount++] =
445 static void rebuild_known_cipher_nids(ENGINE *e)
449 for (i = 0, known_cipher_nids_amount = 0; i < OSSL_NELEM(cipher_data); i++) {
450 if (devcrypto_test_cipher(i))
451 known_cipher_nids[known_cipher_nids_amount++] = cipher_data[i].nid;
453 ENGINE_unregister_ciphers(e);
454 ENGINE_register_ciphers(e);
457 static const EVP_CIPHER *get_cipher_method(int nid)
459 size_t i = get_cipher_data_index(nid);
463 return known_cipher_methods[i];
466 static int get_cipher_nids(const int **nids)
468 *nids = known_cipher_nids;
469 return known_cipher_nids_amount;
472 static void destroy_cipher_method(int nid)
474 size_t i = get_cipher_data_index(nid);
476 EVP_CIPHER_meth_free(known_cipher_methods[i]);
477 known_cipher_methods[i] = NULL;
480 static void destroy_all_cipher_methods(void)
484 for (i = 0; i < OSSL_NELEM(cipher_data); i++)
485 destroy_cipher_method(cipher_data[i].nid);
488 static int devcrypto_ciphers(ENGINE *e, const EVP_CIPHER **cipher,
489 const int **nids, int nid)
492 return get_cipher_nids(nids);
494 *cipher = get_cipher_method(nid);
496 return *cipher != NULL;
499 static void devcrypto_select_all_ciphers(int *cipher_list)
503 for (i = 0; i < OSSL_NELEM(cipher_data); i++)
507 static int cryptodev_select_cipher_cb(const char *str, int len, void *usr)
509 int *cipher_list = (int *)usr;
511 const EVP_CIPHER *EVP;
516 if (usr == NULL || (name = OPENSSL_strndup(str, len)) == NULL)
518 EVP = EVP_get_cipherbyname(name);
520 fprintf(stderr, "devcrypto: unknown cipher %s\n", name);
521 else if ((i = find_cipher_data_index(EVP_CIPHER_nid(EVP))) != (size_t)-1)
524 fprintf(stderr, "devcrypto: cipher %s not available\n", name);
530 * We only support digests if the cryptodev implementation supports multiple
531 * data updates and session copying. Otherwise, we would be forced to maintain
532 * a cache, which is perilous if there's a lot of data coming in (if someone
533 * wants to checksum an OpenSSL tarball, for example).
535 #if defined(CIOCCPHASH) && defined(COP_FLAG_UPDATE) && defined(COP_FLAG_FINAL)
536 #define IMPLEMENT_DIGEST
538 /******************************************************************************
542 * Because they all do the same basic operation, we have only one set of
543 * method functions for them all to share, and a mapping table between
544 * NIDs and cryptodev IDs, with all the necessary size data.
549 struct session_op sess;
550 /* This signals that the init function was called, not that it succeeded. */
552 unsigned char digest_res[HASH_MAX_LEN];
555 static const struct digest_data_st {
560 #ifndef OPENSSL_NO_MD5
561 { NID_md5, 16, CRYPTO_MD5 },
563 { NID_sha1, 20, CRYPTO_SHA1 },
564 #ifndef OPENSSL_NO_RMD160
565 # if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_RIPEMD160)
566 { NID_ripemd160, 20, CRYPTO_RIPEMD160 },
569 #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_224)
570 { NID_sha224, 224 / 8, CRYPTO_SHA2_224 },
572 #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_256)
573 { NID_sha256, 256 / 8, CRYPTO_SHA2_256 },
575 #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_384)
576 { NID_sha384, 384 / 8, CRYPTO_SHA2_384 },
578 #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_512)
579 { NID_sha512, 512 / 8, CRYPTO_SHA2_512 },
583 static size_t find_digest_data_index(int nid)
587 for (i = 0; i < OSSL_NELEM(digest_data); i++)
588 if (nid == digest_data[i].nid)
593 static size_t get_digest_data_index(int nid)
595 size_t i = find_digest_data_index(nid);
601 * Code further down must make sure that only NIDs in the table above
602 * are used. If any other NID reaches this function, there's a grave
603 * coding error further down.
605 assert("Code that never should be reached" == NULL);
609 static const struct digest_data_st *get_digest_data(int nid)
611 return &digest_data[get_digest_data_index(nid)];
615 * Following are the five necessary functions to map OpenSSL functionality
616 * with cryptodev: init, update, final, cleanup, and copy.
619 static int digest_init(EVP_MD_CTX *ctx)
621 struct digest_ctx *digest_ctx =
622 (struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
623 const struct digest_data_st *digest_d =
624 get_digest_data(EVP_MD_CTX_type(ctx));
626 digest_ctx->init_called = 1;
628 memset(&digest_ctx->sess, 0, sizeof(digest_ctx->sess));
629 digest_ctx->sess.mac = digest_d->devcryptoid;
630 if (ioctl(cfd, CIOCGSESSION, &digest_ctx->sess) < 0) {
631 SYSerr(SYS_F_IOCTL, errno);
638 static int digest_op(struct digest_ctx *ctx, const void *src, size_t srclen,
639 void *res, unsigned int flags)
641 struct crypt_op cryp;
643 memset(&cryp, 0, sizeof(cryp));
644 cryp.ses = ctx->sess.ses;
646 cryp.src = (void *)src;
650 return ioctl(cfd, CIOCCRYPT, &cryp);
653 static int digest_update(EVP_MD_CTX *ctx, const void *data, size_t count)
655 struct digest_ctx *digest_ctx =
656 (struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
661 if (digest_ctx == NULL)
664 if (EVP_MD_CTX_test_flags(ctx, EVP_MD_CTX_FLAG_ONESHOT)) {
665 if (digest_op(digest_ctx, data, count, digest_ctx->digest_res, 0) >= 0)
667 } else if (digest_op(digest_ctx, data, count, NULL, COP_FLAG_UPDATE) >= 0) {
671 SYSerr(SYS_F_IOCTL, errno);
675 static int digest_final(EVP_MD_CTX *ctx, unsigned char *md)
677 struct digest_ctx *digest_ctx =
678 (struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
680 if (md == NULL || digest_ctx == NULL)
683 if (EVP_MD_CTX_test_flags(ctx, EVP_MD_CTX_FLAG_ONESHOT)) {
684 memcpy(md, digest_ctx->digest_res, EVP_MD_CTX_size(ctx));
685 } else if (digest_op(digest_ctx, NULL, 0, md, COP_FLAG_FINAL) < 0) {
686 SYSerr(SYS_F_IOCTL, errno);
693 static int digest_copy(EVP_MD_CTX *to, const EVP_MD_CTX *from)
695 struct digest_ctx *digest_from =
696 (struct digest_ctx *)EVP_MD_CTX_md_data(from);
697 struct digest_ctx *digest_to =
698 (struct digest_ctx *)EVP_MD_CTX_md_data(to);
699 struct cphash_op cphash;
701 if (digest_from == NULL || digest_from->init_called != 1)
704 if (!digest_init(to)) {
705 SYSerr(SYS_F_IOCTL, errno);
709 cphash.src_ses = digest_from->sess.ses;
710 cphash.dst_ses = digest_to->sess.ses;
711 if (ioctl(cfd, CIOCCPHASH, &cphash) < 0) {
712 SYSerr(SYS_F_IOCTL, errno);
718 static int digest_cleanup(EVP_MD_CTX *ctx)
720 struct digest_ctx *digest_ctx =
721 (struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
723 if (digest_ctx == NULL)
725 if (ioctl(cfd, CIOCFSESSION, &digest_ctx->sess.ses) < 0) {
726 SYSerr(SYS_F_IOCTL, errno);
733 * Keep tables of known nids, associated methods, selected digests, and
735 * Note that known_digest_nids[] isn't necessarily indexed the same way as
736 * digest_data[] above, which the other tables are.
738 static int known_digest_nids[OSSL_NELEM(digest_data)];
739 static int known_digest_nids_amount = -1; /* -1 indicates not yet initialised */
740 static EVP_MD *known_digest_methods[OSSL_NELEM(digest_data)] = { NULL, };
741 static int selected_digests[OSSL_NELEM(digest_data)];
742 static struct driver_info_st digest_driver_info[OSSL_NELEM(digest_data)];
744 static int devcrypto_test_digest(size_t digest_data_index)
746 return (digest_driver_info[digest_data_index].status == DEVCRYPTO_STATUS_USABLE
747 && selected_digests[digest_data_index] == 1
748 && (digest_driver_info[digest_data_index].accelerated
749 == DEVCRYPTO_ACCELERATED
750 || use_softdrivers == DEVCRYPTO_USE_SOFTWARE
751 || (digest_driver_info[digest_data_index].accelerated
752 != DEVCRYPTO_NOT_ACCELERATED
753 && use_softdrivers == DEVCRYPTO_REJECT_SOFTWARE)));
756 static void rebuild_known_digest_nids(ENGINE *e)
760 for (i = 0, known_digest_nids_amount = 0; i < OSSL_NELEM(digest_data); i++) {
761 if (devcrypto_test_digest(i))
762 known_digest_nids[known_digest_nids_amount++] = digest_data[i].nid;
764 ENGINE_unregister_digests(e);
765 ENGINE_register_digests(e);
768 static void prepare_digest_methods(void)
771 struct session_op sess1, sess2;
773 struct session_info_op siop;
775 struct cphash_op cphash;
777 memset(&digest_driver_info, 0, sizeof(digest_driver_info));
779 memset(&sess1, 0, sizeof(sess1));
780 memset(&sess2, 0, sizeof(sess2));
782 for (i = 0, known_digest_nids_amount = 0; i < OSSL_NELEM(digest_data);
785 selected_digests[i] = 1;
788 * Check that the digest is usable
790 sess1.mac = digest_data[i].devcryptoid;
792 if (ioctl(cfd, CIOCGSESSION, &sess1) < 0) {
793 digest_driver_info[i].status = DEVCRYPTO_STATUS_UNUSABLE;
798 /* gather hardware acceleration info from the driver */
799 siop.ses = sess1.ses;
800 if (ioctl(cfd, CIOCGSESSINFO, &siop) < 0)
801 digest_driver_info[i].accelerated = DEVCRYPTO_ACCELERATION_UNKNOWN;
802 else if (siop.flags & SIOP_FLAG_KERNEL_DRIVER_ONLY)
803 digest_driver_info[i].accelerated = DEVCRYPTO_ACCELERATED;
805 digest_driver_info[i].accelerated = DEVCRYPTO_NOT_ACCELERATED;
808 /* digest must be capable of hash state copy */
809 sess2.mac = sess1.mac;
810 if (ioctl(cfd, CIOCGSESSION, &sess2) < 0) {
811 digest_driver_info[i].status = DEVCRYPTO_STATUS_UNUSABLE;
814 cphash.src_ses = sess1.ses;
815 cphash.dst_ses = sess2.ses;
816 if (ioctl(cfd, CIOCCPHASH, &cphash) < 0) {
817 digest_driver_info[i].status = DEVCRYPTO_STATUS_UNUSABLE;
820 if ((known_digest_methods[i] = EVP_MD_meth_new(digest_data[i].nid,
822 || !EVP_MD_meth_set_result_size(known_digest_methods[i],
823 digest_data[i].digestlen)
824 || !EVP_MD_meth_set_init(known_digest_methods[i], digest_init)
825 || !EVP_MD_meth_set_update(known_digest_methods[i], digest_update)
826 || !EVP_MD_meth_set_final(known_digest_methods[i], digest_final)
827 || !EVP_MD_meth_set_copy(known_digest_methods[i], digest_copy)
828 || !EVP_MD_meth_set_cleanup(known_digest_methods[i], digest_cleanup)
829 || !EVP_MD_meth_set_app_datasize(known_digest_methods[i],
830 sizeof(struct digest_ctx))) {
831 digest_driver_info[i].status = DEVCRYPTO_STATUS_UNUSABLE;
832 EVP_MD_meth_free(known_digest_methods[i]);
833 known_digest_methods[i] = NULL;
836 digest_driver_info[i].status = DEVCRYPTO_STATUS_USABLE;
838 ioctl(cfd, CIOCFSESSION, &sess1.ses);
840 ioctl(cfd, CIOCFSESSION, &sess2.ses);
841 if (devcrypto_test_digest(i))
842 known_digest_nids[known_digest_nids_amount++] = digest_data[i].nid;
846 static const EVP_MD *get_digest_method(int nid)
848 size_t i = get_digest_data_index(nid);
852 return known_digest_methods[i];
855 static int get_digest_nids(const int **nids)
857 *nids = known_digest_nids;
858 return known_digest_nids_amount;
861 static void destroy_digest_method(int nid)
863 size_t i = get_digest_data_index(nid);
865 EVP_MD_meth_free(known_digest_methods[i]);
866 known_digest_methods[i] = NULL;
869 static void destroy_all_digest_methods(void)
873 for (i = 0; i < OSSL_NELEM(digest_data); i++)
874 destroy_digest_method(digest_data[i].nid);
877 static int devcrypto_digests(ENGINE *e, const EVP_MD **digest,
878 const int **nids, int nid)
881 return get_digest_nids(nids);
883 *digest = get_digest_method(nid);
885 return *digest != NULL;
888 static void devcrypto_select_all_digests(int *digest_list)
892 for (i = 0; i < OSSL_NELEM(digest_data); i++)
896 static int cryptodev_select_digest_cb(const char *str, int len, void *usr)
898 int *digest_list = (int *)usr;
905 if (usr == NULL || (name = OPENSSL_strndup(str, len)) == NULL)
907 EVP = EVP_get_digestbyname(name);
909 fprintf(stderr, "devcrypto: unknown digest %s\n", name);
910 else if ((i = find_digest_data_index(EVP_MD_type(EVP))) != (size_t)-1)
913 fprintf(stderr, "devcrypto: digest %s not available\n", name);
920 /******************************************************************************
926 #define DEVCRYPTO_CMD_USE_SOFTDRIVERS ENGINE_CMD_BASE
927 #define DEVCRYPTO_CMD_CIPHERS (ENGINE_CMD_BASE + 1)
928 #define DEVCRYPTO_CMD_DIGESTS (ENGINE_CMD_BASE + 2)
929 #define DEVCRYPTO_CMD_DUMP_INFO (ENGINE_CMD_BASE + 3)
931 static const ENGINE_CMD_DEFN devcrypto_cmds[] = {
933 {DEVCRYPTO_CMD_USE_SOFTDRIVERS,
935 "specifies whether to use software (not accelerated) drivers ("
936 OPENSSL_MSTR(DEVCRYPTO_REQUIRE_ACCELERATED) "=use only accelerated drivers, "
937 OPENSSL_MSTR(DEVCRYPTO_USE_SOFTWARE) "=allow all drivers, "
938 OPENSSL_MSTR(DEVCRYPTO_REJECT_SOFTWARE)
939 "=use if acceleration can't be determined) [default="
940 OPENSSL_MSTR(DEVCRYPTO_DEFAULT_USE_SOFDTRIVERS) "]",
941 ENGINE_CMD_FLAG_NUMERIC},
944 {DEVCRYPTO_CMD_CIPHERS,
946 "either ALL, NONE, or a comma-separated list of ciphers to enable [default=ALL]",
947 ENGINE_CMD_FLAG_STRING},
949 #ifdef IMPLEMENT_DIGEST
950 {DEVCRYPTO_CMD_DIGESTS,
952 "either ALL, NONE, or a comma-separated list of digests to enable [default=ALL]",
953 ENGINE_CMD_FLAG_STRING},
959 static int devcrypto_ctrl(ENGINE *e, int cmd, long i, void *p, void (*f) (void))
964 case DEVCRYPTO_CMD_USE_SOFTDRIVERS:
966 case DEVCRYPTO_REQUIRE_ACCELERATED:
967 case DEVCRYPTO_USE_SOFTWARE:
968 case DEVCRYPTO_REJECT_SOFTWARE:
971 fprintf(stderr, "devcrypto: invalid value (%ld) for USE_SOFTDRIVERS\n", i);
974 if (use_softdrivers == i)
977 #ifdef IMPLEMENT_DIGEST
978 rebuild_known_digest_nids(e);
980 rebuild_known_cipher_nids(e);
982 #endif /* CIOCGSESSINFO */
984 case DEVCRYPTO_CMD_CIPHERS:
987 if (strcasecmp((const char *)p, "ALL") == 0) {
988 devcrypto_select_all_ciphers(selected_ciphers);
989 } else if (strcasecmp((const char*)p, "NONE") == 0) {
990 memset(selected_ciphers, 0, sizeof(selected_ciphers));
992 new_list=OPENSSL_zalloc(sizeof(selected_ciphers));
993 if (!CONF_parse_list(p, ',', 1, cryptodev_select_cipher_cb, new_list)) {
994 OPENSSL_free(new_list);
997 memcpy(selected_ciphers, new_list, sizeof(selected_ciphers));
998 OPENSSL_free(new_list);
1000 rebuild_known_cipher_nids(e);
1003 #ifdef IMPLEMENT_DIGEST
1004 case DEVCRYPTO_CMD_DIGESTS:
1007 if (strcasecmp((const char *)p, "ALL") == 0) {
1008 devcrypto_select_all_digests(selected_digests);
1009 } else if (strcasecmp((const char*)p, "NONE") == 0) {
1010 memset(selected_digests, 0, sizeof(selected_digests));
1012 new_list=OPENSSL_zalloc(sizeof(selected_digests));
1013 if (!CONF_parse_list(p, ',', 1, cryptodev_select_digest_cb, new_list)) {
1014 OPENSSL_free(new_list);
1017 memcpy(selected_digests, new_list, sizeof(selected_digests));
1018 OPENSSL_free(new_list);
1020 rebuild_known_digest_nids(e);
1022 #endif /* IMPLEMENT_DIGEST */
1030 /******************************************************************************
1036 static int devcrypto_unload(ENGINE *e)
1038 destroy_all_cipher_methods();
1039 #ifdef IMPLEMENT_DIGEST
1040 destroy_all_digest_methods();
1048 * This engine is always built into libcrypto, so it doesn't offer any
1049 * ability to be dynamically loadable.
1051 void engine_load_devcrypto_int()
1055 if ((cfd = open("/dev/crypto", O_RDWR, 0)) < 0) {
1056 fprintf(stderr, "Could not open /dev/crypto: %s\n", strerror(errno));
1060 if ((e = ENGINE_new()) == NULL
1061 || !ENGINE_set_destroy_function(e, devcrypto_unload)) {
1064 * We know that devcrypto_unload() won't be called when one of the
1065 * above two calls have failed, so we close cfd explicitly here to
1066 * avoid leaking resources.
1072 prepare_cipher_methods();
1073 #ifdef IMPLEMENT_DIGEST
1074 prepare_digest_methods();
1077 if (!ENGINE_set_id(e, "devcrypto")
1078 || !ENGINE_set_name(e, "/dev/crypto engine")
1079 || !ENGINE_set_cmd_defns(e, devcrypto_cmds)
1080 || !ENGINE_set_ctrl_function(e, devcrypto_ctrl)
1083 * Asymmetric ciphers aren't well supported with /dev/crypto. Among the BSD
1084 * implementations, it seems to only exist in FreeBSD, and regarding the
1085 * parameters in its crypt_kop, the manual crypto(4) has this to say:
1087 * The semantics of these arguments are currently undocumented.
1089 * Reading through the FreeBSD source code doesn't give much more than
1090 * their CRK_MOD_EXP implementation for ubsec.
1092 * It doesn't look much better with cryptodev-linux. They have the crypt_kop
1093 * structure as well as the command (CRK_*) in cryptodev.h, but no support
1094 * seems to be implemented at all for the moment.
1096 * At the time of writing, it seems impossible to write proper support for
1097 * FreeBSD's asym features without some very deep knowledge and access to
1098 * specific kernel modules.
1100 * /Richard Levitte, 2017-05-11
1103 # ifndef OPENSSL_NO_RSA
1104 || !ENGINE_set_RSA(e, devcrypto_rsa)
1106 # ifndef OPENSSL_NO_DSA
1107 || !ENGINE_set_DSA(e, devcrypto_dsa)
1109 # ifndef OPENSSL_NO_DH
1110 || !ENGINE_set_DH(e, devcrypto_dh)
1112 # ifndef OPENSSL_NO_EC
1113 || !ENGINE_set_EC(e, devcrypto_ec)
1116 || !ENGINE_set_ciphers(e, devcrypto_ciphers)
1117 #ifdef IMPLEMENT_DIGEST
1118 || !ENGINE_set_digests(e, devcrypto_digests)
1126 ENGINE_free(e); /* Loose our local reference */