2 * Copyright 2017-2018 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
12 #include <sys/types.h>
15 #include <sys/ioctl.h>
19 #include <openssl/evp.h>
20 #include <openssl/err.h>
21 #include <openssl/engine.h>
22 #include <openssl/objects.h>
23 #include <crypto/cryptodev.h>
25 #include "internal/engine.h"
27 #ifdef CRYPTO_ALGORITHM_MIN
28 # define CHECK_BSD_STYLE_MACROS
32 * ONE global file descriptor for all sessions. This allows operations
33 * such as digest session data copying (see digest_copy()), but is also
34 * saner... why re-open /dev/crypto for every session?
38 /******************************************************************************
42 * Because they all do the same basic operation, we have only one set of
43 * method functions for them all to share, and a mapping table between
44 * NIDs and cryptodev IDs, with all the necessary size data.
49 struct session_op sess;
50 int op; /* COP_ENCRYPT or COP_DECRYPT */
51 unsigned long mode; /* EVP_CIPH_*_MODE */
53 /* to handle ctr mode being a stream cipher */
54 unsigned char partial[EVP_MAX_BLOCK_LENGTH];
55 unsigned int blocksize, num;
58 static const struct cipher_data_st {
66 #ifndef OPENSSL_NO_DES
67 { NID_des_cbc, 8, 8, 8, EVP_CIPH_CBC_MODE, CRYPTO_DES_CBC },
68 { NID_des_ede3_cbc, 8, 24, 8, EVP_CIPH_CBC_MODE, CRYPTO_3DES_CBC },
71 { NID_bf_cbc, 8, 16, 8, EVP_CIPH_CBC_MODE, CRYPTO_BLF_CBC },
73 #ifndef OPENSSL_NO_CAST
74 { NID_cast5_cbc, 8, 16, 8, EVP_CIPH_CBC_MODE, CRYPTO_CAST_CBC },
76 { NID_aes_128_cbc, 16, 128 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC },
77 { NID_aes_192_cbc, 16, 192 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC },
78 { NID_aes_256_cbc, 16, 256 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC },
79 #ifndef OPENSSL_NO_RC4
80 { NID_rc4, 1, 16, 0, EVP_CIPH_STREAM_CIPHER, CRYPTO_ARC4 },
82 #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_AES_CTR)
83 { NID_aes_128_ctr, 16, 128 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR },
84 { NID_aes_192_ctr, 16, 192 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR },
85 { NID_aes_256_ctr, 16, 256 / 8, 16, EVP_CIPH_CTR_MODE, CRYPTO_AES_CTR },
87 #if 0 /* Not yet supported */
88 { NID_aes_128_xts, 16, 128 / 8 * 2, 16, EVP_CIPH_XTS_MODE, CRYPTO_AES_XTS },
89 { NID_aes_256_xts, 16, 256 / 8 * 2, 16, EVP_CIPH_XTS_MODE, CRYPTO_AES_XTS },
91 #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_AES_ECB)
92 { NID_aes_128_ecb, 16, 128 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
93 { NID_aes_192_ecb, 16, 192 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
94 { NID_aes_256_ecb, 16, 256 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB },
96 #if 0 /* Not yet supported */
97 { NID_aes_128_gcm, 16, 128 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM },
98 { NID_aes_192_gcm, 16, 192 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM },
99 { NID_aes_256_gcm, 16, 256 / 8, 16, EVP_CIPH_GCM_MODE, CRYPTO_AES_GCM },
101 #ifndef OPENSSL_NO_CAMELLIA
102 { NID_camellia_128_cbc, 16, 128 / 8, 16, EVP_CIPH_CBC_MODE,
103 CRYPTO_CAMELLIA_CBC },
104 { NID_camellia_192_cbc, 16, 192 / 8, 16, EVP_CIPH_CBC_MODE,
105 CRYPTO_CAMELLIA_CBC },
106 { NID_camellia_256_cbc, 16, 256 / 8, 16, EVP_CIPH_CBC_MODE,
107 CRYPTO_CAMELLIA_CBC },
111 static size_t get_cipher_data_index(int nid)
115 for (i = 0; i < OSSL_NELEM(cipher_data); i++)
116 if (nid == cipher_data[i].nid)
120 * Code further down must make sure that only NIDs in the table above
121 * are used. If any other NID reaches this function, there's a grave
122 * coding error further down.
124 assert("Code that never should be reached" == NULL);
128 static const struct cipher_data_st *get_cipher_data(int nid)
130 return &cipher_data[get_cipher_data_index(nid)];
134 * Following are the three necessary functions to map OpenSSL functionality
138 static int cipher_init(EVP_CIPHER_CTX *ctx, const unsigned char *key,
139 const unsigned char *iv, int enc)
141 struct cipher_ctx *cipher_ctx =
142 (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
143 const struct cipher_data_st *cipher_d =
144 get_cipher_data(EVP_CIPHER_CTX_nid(ctx));
146 memset(&cipher_ctx->sess, 0, sizeof(cipher_ctx->sess));
147 cipher_ctx->sess.cipher = cipher_d->devcryptoid;
148 cipher_ctx->sess.keylen = cipher_d->keylen;
149 cipher_ctx->sess.key = (void *)key;
150 cipher_ctx->op = enc ? COP_ENCRYPT : COP_DECRYPT;
151 cipher_ctx->mode = cipher_d->flags & EVP_CIPH_MODE;
152 cipher_ctx->blocksize = cipher_d->blocksize;
153 if (ioctl(cfd, CIOCGSESSION, &cipher_ctx->sess) < 0) {
154 SYSerr(SYS_F_IOCTL, errno);
161 static int cipher_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
162 const unsigned char *in, size_t inl)
164 struct cipher_ctx *cipher_ctx =
165 (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
166 struct crypt_op cryp;
167 unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
168 #if !defined(COP_FLAG_WRITE_IV)
169 unsigned char saved_iv[EVP_MAX_IV_LENGTH];
170 const unsigned char *ivptr;
171 size_t nblocks, ivlen;
174 memset(&cryp, 0, sizeof(cryp));
175 cryp.ses = cipher_ctx->sess.ses;
177 cryp.src = (void *)in;
178 cryp.dst = (void *)out;
179 cryp.iv = (void *)iv;
180 cryp.op = cipher_ctx->op;
181 #if !defined(COP_FLAG_WRITE_IV)
184 ivlen = EVP_CIPHER_CTX_iv_length(ctx);
186 switch (cipher_ctx->mode) {
187 case EVP_CIPH_CBC_MODE:
188 assert(inl >= ivlen);
189 if (!EVP_CIPHER_CTX_encrypting(ctx)) {
190 ivptr = in + inl - ivlen;
191 memcpy(saved_iv, ivptr, ivlen);
195 case EVP_CIPH_CTR_MODE:
198 default: /* should not happen */
202 cryp.flags = COP_FLAG_WRITE_IV;
205 if (ioctl(cfd, CIOCCRYPT, &cryp) < 0) {
206 SYSerr(SYS_F_IOCTL, errno);
210 #if !defined(COP_FLAG_WRITE_IV)
212 switch (cipher_ctx->mode) {
213 case EVP_CIPH_CBC_MODE:
214 assert(inl >= ivlen);
215 if (EVP_CIPHER_CTX_encrypting(ctx))
216 ivptr = out + inl - ivlen;
220 memcpy(iv, ivptr, ivlen);
223 case EVP_CIPH_CTR_MODE:
224 nblocks = (inl + cipher_ctx->blocksize - 1)
225 / cipher_ctx->blocksize;
228 nblocks += iv[ivlen];
229 iv[ivlen] = (uint8_t) nblocks;
234 default: /* should not happen */
242 static int ctr_do_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
243 const unsigned char *in, size_t inl)
245 struct cipher_ctx *cipher_ctx =
246 (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
249 /* initial partial block */
250 while (cipher_ctx->num && inl) {
251 (*out++) = *(in++) ^ cipher_ctx->partial[cipher_ctx->num];
253 cipher_ctx->num = (cipher_ctx->num + 1) % cipher_ctx->blocksize;
257 if (inl > (unsigned int) cipher_ctx->blocksize) {
258 nblocks = inl/cipher_ctx->blocksize;
259 len = nblocks * cipher_ctx->blocksize;
260 if (cipher_do_cipher(ctx, out, in, len) < 1)
267 /* final partial block */
269 memset(cipher_ctx->partial, 0, cipher_ctx->blocksize);
270 if (cipher_do_cipher(ctx, cipher_ctx->partial, cipher_ctx->partial,
271 cipher_ctx->blocksize) < 1)
274 out[cipher_ctx->num] = in[cipher_ctx->num]
275 ^ cipher_ctx->partial[cipher_ctx->num];
283 static int cipher_ctrl(EVP_CIPHER_CTX *ctx, int type, int p1, void* p2)
285 EVP_CIPHER_CTX *to_ctx = (EVP_CIPHER_CTX *)p2;
286 struct cipher_ctx *cipher_ctx;
288 if (type == EVP_CTRL_COPY) {
289 /* when copying the context, a new session needs to be initialized */
290 cipher_ctx = (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
291 return (cipher_ctx == NULL)
292 || cipher_init(to_ctx, cipher_ctx->sess.key, EVP_CIPHER_CTX_iv(ctx),
293 (cipher_ctx->op == COP_ENCRYPT));
299 static int cipher_cleanup(EVP_CIPHER_CTX *ctx)
301 struct cipher_ctx *cipher_ctx =
302 (struct cipher_ctx *)EVP_CIPHER_CTX_get_cipher_data(ctx);
304 if (ioctl(cfd, CIOCFSESSION, &cipher_ctx->sess.ses) < 0) {
305 SYSerr(SYS_F_IOCTL, errno);
313 * Keep a table of known nids and associated methods.
314 * Note that known_cipher_nids[] isn't necessarily indexed the same way as
315 * cipher_data[] above, which known_cipher_methods[] is.
317 static int known_cipher_nids[OSSL_NELEM(cipher_data)];
318 static int known_cipher_nids_amount = -1; /* -1 indicates not yet initialised */
319 static EVP_CIPHER *known_cipher_methods[OSSL_NELEM(cipher_data)] = { NULL, };
321 static void prepare_cipher_methods(void)
324 struct session_op sess;
325 unsigned long cipher_mode;
327 memset(&sess, 0, sizeof(sess));
328 sess.key = (void *)"01234567890123456789012345678901234567890123456789";
330 for (i = 0, known_cipher_nids_amount = 0;
331 i < OSSL_NELEM(cipher_data); i++) {
334 * Check that the algo is really availably by trying to open and close
337 sess.cipher = cipher_data[i].devcryptoid;
338 sess.keylen = cipher_data[i].keylen;
339 if (ioctl(cfd, CIOCGSESSION, &sess) < 0
340 || ioctl(cfd, CIOCFSESSION, &sess.ses) < 0)
343 cipher_mode = cipher_data[i].flags & EVP_CIPH_MODE;
345 if ((known_cipher_methods[i] =
346 EVP_CIPHER_meth_new(cipher_data[i].nid,
347 cipher_mode == EVP_CIPH_CTR_MODE ? 1 :
348 cipher_data[i].blocksize,
349 cipher_data[i].keylen)) == NULL
350 || !EVP_CIPHER_meth_set_iv_length(known_cipher_methods[i],
351 cipher_data[i].ivlen)
352 || !EVP_CIPHER_meth_set_flags(known_cipher_methods[i],
354 | EVP_CIPH_CUSTOM_COPY
355 | EVP_CIPH_FLAG_DEFAULT_ASN1)
356 || !EVP_CIPHER_meth_set_init(known_cipher_methods[i], cipher_init)
357 || !EVP_CIPHER_meth_set_do_cipher(known_cipher_methods[i],
358 cipher_mode == EVP_CIPH_CTR_MODE ?
361 || !EVP_CIPHER_meth_set_ctrl(known_cipher_methods[i], cipher_ctrl)
362 || !EVP_CIPHER_meth_set_cleanup(known_cipher_methods[i],
364 || !EVP_CIPHER_meth_set_impl_ctx_size(known_cipher_methods[i],
365 sizeof(struct cipher_ctx))) {
366 EVP_CIPHER_meth_free(known_cipher_methods[i]);
367 known_cipher_methods[i] = NULL;
369 known_cipher_nids[known_cipher_nids_amount++] =
375 static const EVP_CIPHER *get_cipher_method(int nid)
377 size_t i = get_cipher_data_index(nid);
381 return known_cipher_methods[i];
384 static int get_cipher_nids(const int **nids)
386 *nids = known_cipher_nids;
387 return known_cipher_nids_amount;
390 static void destroy_cipher_method(int nid)
392 size_t i = get_cipher_data_index(nid);
394 EVP_CIPHER_meth_free(known_cipher_methods[i]);
395 known_cipher_methods[i] = NULL;
398 static void destroy_all_cipher_methods(void)
402 for (i = 0; i < OSSL_NELEM(cipher_data); i++)
403 destroy_cipher_method(cipher_data[i].nid);
406 static int devcrypto_ciphers(ENGINE *e, const EVP_CIPHER **cipher,
407 const int **nids, int nid)
410 return get_cipher_nids(nids);
412 *cipher = get_cipher_method(nid);
414 return *cipher != NULL;
418 * We only support digests if the cryptodev implementation supports multiple
419 * data updates and session copying. Otherwise, we would be forced to maintain
420 * a cache, which is perilous if there's a lot of data coming in (if someone
421 * wants to checksum an OpenSSL tarball, for example).
423 #if defined(CIOCCPHASH) && defined(COP_FLAG_UPDATE) && defined(COP_FLAG_FINAL)
424 #define IMPLEMENT_DIGEST
426 /******************************************************************************
430 * Because they all do the same basic operation, we have only one set of
431 * method functions for them all to share, and a mapping table between
432 * NIDs and cryptodev IDs, with all the necessary size data.
437 struct session_op sess;
438 /* This signals that the init function was called, not that it succeeded. */
442 static const struct digest_data_st {
447 #ifndef OPENSSL_NO_MD5
448 { NID_md5, 16, CRYPTO_MD5 },
450 { NID_sha1, 20, CRYPTO_SHA1 },
451 #ifndef OPENSSL_NO_RMD160
452 # if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_RIPEMD160)
453 { NID_ripemd160, 20, CRYPTO_RIPEMD160 },
456 #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_224)
457 { NID_sha224, 224 / 8, CRYPTO_SHA2_224 },
459 #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_256)
460 { NID_sha256, 256 / 8, CRYPTO_SHA2_256 },
462 #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_384)
463 { NID_sha384, 384 / 8, CRYPTO_SHA2_384 },
465 #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_SHA2_512)
466 { NID_sha512, 512 / 8, CRYPTO_SHA2_512 },
470 static size_t get_digest_data_index(int nid)
474 for (i = 0; i < OSSL_NELEM(digest_data); i++)
475 if (nid == digest_data[i].nid)
479 * Code further down must make sure that only NIDs in the table above
480 * are used. If any other NID reaches this function, there's a grave
481 * coding error further down.
483 assert("Code that never should be reached" == NULL);
487 static const struct digest_data_st *get_digest_data(int nid)
489 return &digest_data[get_digest_data_index(nid)];
493 * Following are the four necessary functions to map OpenSSL functionality
497 static int digest_init(EVP_MD_CTX *ctx)
499 struct digest_ctx *digest_ctx =
500 (struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
501 const struct digest_data_st *digest_d =
502 get_digest_data(EVP_MD_CTX_type(ctx));
504 digest_ctx->init_called = 1;
506 memset(&digest_ctx->sess, 0, sizeof(digest_ctx->sess));
507 digest_ctx->sess.mac = digest_d->devcryptoid;
508 if (ioctl(cfd, CIOCGSESSION, &digest_ctx->sess) < 0) {
509 SYSerr(SYS_F_IOCTL, errno);
516 static int digest_op(struct digest_ctx *ctx, const void *src, size_t srclen,
517 void *res, unsigned int flags)
519 struct crypt_op cryp;
521 memset(&cryp, 0, sizeof(cryp));
522 cryp.ses = ctx->sess.ses;
524 cryp.src = (void *)src;
528 return ioctl(cfd, CIOCCRYPT, &cryp);
531 static int digest_update(EVP_MD_CTX *ctx, const void *data, size_t count)
533 struct digest_ctx *digest_ctx =
534 (struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
539 if (digest_ctx == NULL)
542 if (digest_op(digest_ctx, data, count, NULL, COP_FLAG_UPDATE) < 0) {
543 SYSerr(SYS_F_IOCTL, errno);
550 static int digest_final(EVP_MD_CTX *ctx, unsigned char *md)
552 struct digest_ctx *digest_ctx =
553 (struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
555 if (md == NULL || digest_ctx == NULL)
557 if (digest_op(digest_ctx, NULL, 0, md, COP_FLAG_FINAL) < 0) {
558 SYSerr(SYS_F_IOCTL, errno);
565 static int digest_copy(EVP_MD_CTX *to, const EVP_MD_CTX *from)
567 struct digest_ctx *digest_from =
568 (struct digest_ctx *)EVP_MD_CTX_md_data(from);
569 struct digest_ctx *digest_to =
570 (struct digest_ctx *)EVP_MD_CTX_md_data(to);
571 struct cphash_op cphash;
573 if (digest_from == NULL || digest_from->init_called != 1)
576 if (!digest_init(to)) {
577 SYSerr(SYS_F_IOCTL, errno);
581 cphash.src_ses = digest_from->sess.ses;
582 cphash.dst_ses = digest_to->sess.ses;
583 if (ioctl(cfd, CIOCCPHASH, &cphash) < 0) {
584 SYSerr(SYS_F_IOCTL, errno);
590 static int digest_cleanup(EVP_MD_CTX *ctx)
592 struct digest_ctx *digest_ctx =
593 (struct digest_ctx *)EVP_MD_CTX_md_data(ctx);
595 if (digest_ctx == NULL)
597 if (ioctl(cfd, CIOCFSESSION, &digest_ctx->sess.ses) < 0) {
598 SYSerr(SYS_F_IOCTL, errno);
604 static int devcrypto_test_digest(size_t digest_data_index)
606 struct session_op sess1, sess2;
607 struct cphash_op cphash;
610 memset(&sess1, 0, sizeof(sess1));
611 memset(&sess2, 0, sizeof(sess2));
612 sess1.mac = digest_data[digest_data_index].devcryptoid;
613 if (ioctl(cfd, CIOCGSESSION, &sess1) < 0)
615 /* Make sure the driver is capable of hash state copy */
616 sess2.mac = sess1.mac;
617 if (ioctl(cfd, CIOCGSESSION, &sess2) >= 0) {
618 cphash.src_ses = sess1.ses;
619 cphash.dst_ses = sess2.ses;
620 if (ioctl(cfd, CIOCCPHASH, &cphash) >= 0)
622 ioctl(cfd, CIOCFSESSION, &sess2.ses);
624 ioctl(cfd, CIOCFSESSION, &sess1.ses);
629 * Keep a table of known nids and associated methods.
630 * Note that known_digest_nids[] isn't necessarily indexed the same way as
631 * digest_data[] above, which known_digest_methods[] is.
633 static int known_digest_nids[OSSL_NELEM(digest_data)];
634 static int known_digest_nids_amount = -1; /* -1 indicates not yet initialised */
635 static EVP_MD *known_digest_methods[OSSL_NELEM(digest_data)] = { NULL, };
637 static void prepare_digest_methods(void)
641 for (i = 0, known_digest_nids_amount = 0; i < OSSL_NELEM(digest_data);
645 * Check that the algo is usable
647 if (!devcrypto_test_digest(i))
650 if ((known_digest_methods[i] = EVP_MD_meth_new(digest_data[i].nid,
652 || !EVP_MD_meth_set_result_size(known_digest_methods[i],
653 digest_data[i].digestlen)
654 || !EVP_MD_meth_set_init(known_digest_methods[i], digest_init)
655 || !EVP_MD_meth_set_update(known_digest_methods[i], digest_update)
656 || !EVP_MD_meth_set_final(known_digest_methods[i], digest_final)
657 || !EVP_MD_meth_set_copy(known_digest_methods[i], digest_copy)
658 || !EVP_MD_meth_set_cleanup(known_digest_methods[i], digest_cleanup)
659 || !EVP_MD_meth_set_app_datasize(known_digest_methods[i],
660 sizeof(struct digest_ctx))) {
661 EVP_MD_meth_free(known_digest_methods[i]);
662 known_digest_methods[i] = NULL;
664 known_digest_nids[known_digest_nids_amount++] = digest_data[i].nid;
669 static const EVP_MD *get_digest_method(int nid)
671 size_t i = get_digest_data_index(nid);
675 return known_digest_methods[i];
678 static int get_digest_nids(const int **nids)
680 *nids = known_digest_nids;
681 return known_digest_nids_amount;
684 static void destroy_digest_method(int nid)
686 size_t i = get_digest_data_index(nid);
688 EVP_MD_meth_free(known_digest_methods[i]);
689 known_digest_methods[i] = NULL;
692 static void destroy_all_digest_methods(void)
696 for (i = 0; i < OSSL_NELEM(digest_data); i++)
697 destroy_digest_method(digest_data[i].nid);
700 static int devcrypto_digests(ENGINE *e, const EVP_MD **digest,
701 const int **nids, int nid)
704 return get_digest_nids(nids);
706 *digest = get_digest_method(nid);
708 return *digest != NULL;
713 /******************************************************************************
719 static int devcrypto_unload(ENGINE *e)
721 destroy_all_cipher_methods();
722 #ifdef IMPLEMENT_DIGEST
723 destroy_all_digest_methods();
731 * This engine is always built into libcrypto, so it doesn't offer any
732 * ability to be dynamically loadable.
734 void engine_load_devcrypto_int()
738 if ((cfd = open("/dev/crypto", O_RDWR, 0)) < 0) {
739 fprintf(stderr, "Could not open /dev/crypto: %s\n", strerror(errno));
743 if ((e = ENGINE_new()) == NULL
744 || !ENGINE_set_destroy_function(e, devcrypto_unload)) {
747 * We know that devcrypto_unload() won't be called when one of the
748 * above two calls have failed, so we close cfd explicitly here to
749 * avoid leaking resources.
755 prepare_cipher_methods();
756 #ifdef IMPLEMENT_DIGEST
757 prepare_digest_methods();
760 if (!ENGINE_set_id(e, "devcrypto")
761 || !ENGINE_set_name(e, "/dev/crypto engine")
764 * Asymmetric ciphers aren't well supported with /dev/crypto. Among the BSD
765 * implementations, it seems to only exist in FreeBSD, and regarding the
766 * parameters in its crypt_kop, the manual crypto(4) has this to say:
768 * The semantics of these arguments are currently undocumented.
770 * Reading through the FreeBSD source code doesn't give much more than
771 * their CRK_MOD_EXP implementation for ubsec.
773 * It doesn't look much better with cryptodev-linux. They have the crypt_kop
774 * structure as well as the command (CRK_*) in cryptodev.h, but no support
775 * seems to be implemented at all for the moment.
777 * At the time of writing, it seems impossible to write proper support for
778 * FreeBSD's asym features without some very deep knowledge and access to
779 * specific kernel modules.
781 * /Richard Levitte, 2017-05-11
784 # ifndef OPENSSL_NO_RSA
785 || !ENGINE_set_RSA(e, devcrypto_rsa)
787 # ifndef OPENSSL_NO_DSA
788 || !ENGINE_set_DSA(e, devcrypto_dsa)
790 # ifndef OPENSSL_NO_DH
791 || !ENGINE_set_DH(e, devcrypto_dh)
793 # ifndef OPENSSL_NO_EC
794 || !ENGINE_set_EC(e, devcrypto_ec)
797 || !ENGINE_set_ciphers(e, devcrypto_ciphers)
798 #ifdef IMPLEMENT_DIGEST
799 || !ENGINE_set_digests(e, devcrypto_digests)
807 ENGINE_free(e); /* Loose our local reference */