2 * Copyright 2015-2022 The OpenSSL Project Authors. All Rights Reserved.
3 * Copyright 2004-2014, Akamai Technologies. All Rights Reserved.
5 * Licensed under the Apache License 2.0 (the "License"). You may not use
6 * this file except in compliance with the License. You can obtain a copy
7 * in the file LICENSE in the source distribution or at
8 * https://www.openssl.org/source/license.html
12 * This file is in two halves. The first half implements the public API
13 * to be used by external consumers, and to be used by OpenSSL to store
14 * data in a "secure arena." The second half implements the secure arena.
15 * For details on that implementation, see below (look for uppercase
16 * "SECURE HEAP IMPLEMENTATION").
18 #include "internal/e_os.h"
19 #include <openssl/crypto.h>
20 #include <openssl/err.h>
24 #ifndef OPENSSL_NO_SECURE_MEMORY
27 # if defined(WINAPI_FAMILY_PARTITION) \
28 && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP | WINAPI_PARTITION_SYSTEM)
30 * While VirtualLock is available under the app partition (e.g. UWP),
31 * the headers do not define the API. Define it ourselves instead.
37 _In_ LPVOID lpAddress,
44 # if defined(OPENSSL_SYS_UNIX)
47 # include <sys/types.h>
48 # if defined(OPENSSL_SYS_UNIX)
49 # include <sys/mman.h>
50 # if defined(__FreeBSD__)
51 # define MADV_DONTDUMP MADV_NOCORE
53 # if !defined(MAP_CONCEAL)
54 # define MAP_CONCEAL 0
57 # if defined(OPENSSL_SYS_LINUX)
58 # include <sys/syscall.h>
59 # if defined(SYS_mlock2)
60 # include <linux/mman.h>
63 # include <sys/param.h>
65 # include <sys/stat.h>
69 #define CLEAR(p, s) OPENSSL_cleanse(p, s)
71 # define PAGE_SIZE 4096
73 #if !defined(MAP_ANON) && defined(MAP_ANONYMOUS)
74 # define MAP_ANON MAP_ANONYMOUS
77 #ifndef OPENSSL_NO_SECURE_MEMORY
78 static size_t secure_mem_used;
80 static int secure_mem_initialized;
82 static CRYPTO_RWLOCK *sec_malloc_lock = NULL;
85 * These are the functions that must be implemented by a secure heap (sh).
87 static int sh_init(size_t size, size_t minsize);
88 static void *sh_malloc(size_t size);
89 static void sh_free(void *ptr);
90 static void sh_done(void);
91 static size_t sh_actual_size(char *ptr);
92 static int sh_allocated(const char *ptr);
95 int CRYPTO_secure_malloc_init(size_t size, size_t minsize)
97 #ifndef OPENSSL_NO_SECURE_MEMORY
100 if (!secure_mem_initialized) {
101 sec_malloc_lock = CRYPTO_THREAD_lock_new();
102 if (sec_malloc_lock == NULL)
104 if ((ret = sh_init(size, minsize)) != 0) {
105 secure_mem_initialized = 1;
107 CRYPTO_THREAD_lock_free(sec_malloc_lock);
108 sec_malloc_lock = NULL;
115 #endif /* OPENSSL_NO_SECURE_MEMORY */
118 int CRYPTO_secure_malloc_done(void)
120 #ifndef OPENSSL_NO_SECURE_MEMORY
121 if (secure_mem_used == 0) {
123 secure_mem_initialized = 0;
124 CRYPTO_THREAD_lock_free(sec_malloc_lock);
125 sec_malloc_lock = NULL;
128 #endif /* OPENSSL_NO_SECURE_MEMORY */
132 int CRYPTO_secure_malloc_initialized(void)
134 #ifndef OPENSSL_NO_SECURE_MEMORY
135 return secure_mem_initialized;
138 #endif /* OPENSSL_NO_SECURE_MEMORY */
141 void *CRYPTO_secure_malloc(size_t num, const char *file, int line)
143 #ifndef OPENSSL_NO_SECURE_MEMORY
146 int reason = CRYPTO_R_SECURE_MALLOC_FAILURE;
148 if (!secure_mem_initialized) {
149 return CRYPTO_malloc(num, file, line);
151 if (!CRYPTO_THREAD_write_lock(sec_malloc_lock)) {
152 reason = ERR_R_CRYPTO_LIB;
155 ret = sh_malloc(num);
156 actual_size = ret ? sh_actual_size(ret) : 0;
157 secure_mem_used += actual_size;
158 CRYPTO_THREAD_unlock(sec_malloc_lock);
160 if (ret == NULL && (file != NULL || line != 0)) {
162 ERR_set_debug(file, line, NULL);
163 ERR_set_error(ERR_LIB_CRYPTO, reason, NULL);
167 return CRYPTO_malloc(num, file, line);
168 #endif /* OPENSSL_NO_SECURE_MEMORY */
171 void *CRYPTO_secure_zalloc(size_t num, const char *file, int line)
173 #ifndef OPENSSL_NO_SECURE_MEMORY
174 if (secure_mem_initialized)
175 /* CRYPTO_secure_malloc() zeroes allocations when it is implemented */
176 return CRYPTO_secure_malloc(num, file, line);
178 return CRYPTO_zalloc(num, file, line);
181 void CRYPTO_secure_free(void *ptr, const char *file, int line)
183 #ifndef OPENSSL_NO_SECURE_MEMORY
188 if (!CRYPTO_secure_allocated(ptr)) {
189 CRYPTO_free(ptr, file, line);
192 if (!CRYPTO_THREAD_write_lock(sec_malloc_lock))
194 actual_size = sh_actual_size(ptr);
195 CLEAR(ptr, actual_size);
196 secure_mem_used -= actual_size;
198 CRYPTO_THREAD_unlock(sec_malloc_lock);
200 CRYPTO_free(ptr, file, line);
201 #endif /* OPENSSL_NO_SECURE_MEMORY */
204 void CRYPTO_secure_clear_free(void *ptr, size_t num,
205 const char *file, int line)
207 #ifndef OPENSSL_NO_SECURE_MEMORY
212 if (!CRYPTO_secure_allocated(ptr)) {
213 OPENSSL_cleanse(ptr, num);
214 CRYPTO_free(ptr, file, line);
217 if (!CRYPTO_THREAD_write_lock(sec_malloc_lock))
219 actual_size = sh_actual_size(ptr);
220 CLEAR(ptr, actual_size);
221 secure_mem_used -= actual_size;
223 CRYPTO_THREAD_unlock(sec_malloc_lock);
227 OPENSSL_cleanse(ptr, num);
228 CRYPTO_free(ptr, file, line);
229 #endif /* OPENSSL_NO_SECURE_MEMORY */
232 int CRYPTO_secure_allocated(const void *ptr)
234 #ifndef OPENSSL_NO_SECURE_MEMORY
235 if (!secure_mem_initialized)
238 * Only read accesses to the arena take place in sh_allocated() and this
239 * is only changed by the sh_init() and sh_done() calls which are not
240 * locked. Hence, it is safe to make this check without a lock too.
242 return sh_allocated(ptr);
245 #endif /* OPENSSL_NO_SECURE_MEMORY */
248 size_t CRYPTO_secure_used(void)
250 #ifndef OPENSSL_NO_SECURE_MEMORY
251 return secure_mem_used;
254 #endif /* OPENSSL_NO_SECURE_MEMORY */
257 size_t CRYPTO_secure_actual_size(void *ptr)
259 #ifndef OPENSSL_NO_SECURE_MEMORY
262 if (!CRYPTO_THREAD_write_lock(sec_malloc_lock))
264 actual_size = sh_actual_size(ptr);
265 CRYPTO_THREAD_unlock(sec_malloc_lock);
273 * SECURE HEAP IMPLEMENTATION
275 #ifndef OPENSSL_NO_SECURE_MEMORY
279 * The implementation provided here uses a fixed-sized mmap() heap,
280 * which is locked into memory, not written to core files, and protected
281 * on either side by an unmapped page, which will catch pointer overruns
282 * (or underruns) and an attempt to read data out of the secure heap.
283 * Free'd memory is zero'd or otherwise cleansed.
285 * This is a pretty standard buddy allocator. We keep areas in a multiple
286 * of "sh.minsize" units. The freelist and bitmaps are kept separately,
287 * so all (and only) data is kept in the mmap'd heap.
289 * This code assumes eight-bit bytes. The numbers 3 and 7 are all over the
293 #define ONE ((size_t)1)
295 # define TESTBIT(t, b) (t[(b) >> 3] & (ONE << ((b) & 7)))
296 # define SETBIT(t, b) (t[(b) >> 3] |= (ONE << ((b) & 7)))
297 # define CLEARBIT(t, b) (t[(b) >> 3] &= (0xFF & ~(ONE << ((b) & 7))))
299 #define WITHIN_ARENA(p) \
300 ((char*)(p) >= sh.arena && (char*)(p) < &sh.arena[sh.arena_size])
301 #define WITHIN_FREELIST(p) \
302 ((char*)(p) >= (char*)sh.freelist && (char*)(p) < (char*)&sh.freelist[sh.freelist_size])
305 typedef struct sh_list_st
307 struct sh_list_st *next;
308 struct sh_list_st **p_next;
318 ossl_ssize_t freelist_size;
320 unsigned char *bittable;
321 unsigned char *bitmalloc;
322 size_t bittable_size; /* size in bits */
327 static size_t sh_getlist(char *ptr)
329 ossl_ssize_t list = sh.freelist_size - 1;
330 size_t bit = (sh.arena_size + ptr - sh.arena) / sh.minsize;
332 for (; bit; bit >>= 1, list--) {
333 if (TESTBIT(sh.bittable, bit))
335 OPENSSL_assert((bit & 1) == 0);
342 static int sh_testbit(char *ptr, int list, unsigned char *table)
346 OPENSSL_assert(list >= 0 && list < sh.freelist_size);
347 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
348 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
349 OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
350 return TESTBIT(table, bit);
353 static void sh_clearbit(char *ptr, int list, unsigned char *table)
357 OPENSSL_assert(list >= 0 && list < sh.freelist_size);
358 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
359 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
360 OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
361 OPENSSL_assert(TESTBIT(table, bit));
362 CLEARBIT(table, bit);
365 static void sh_setbit(char *ptr, int list, unsigned char *table)
369 OPENSSL_assert(list >= 0 && list < sh.freelist_size);
370 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
371 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
372 OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
373 OPENSSL_assert(!TESTBIT(table, bit));
377 static void sh_add_to_list(char **list, char *ptr)
381 OPENSSL_assert(WITHIN_FREELIST(list));
382 OPENSSL_assert(WITHIN_ARENA(ptr));
384 temp = (SH_LIST *)ptr;
385 temp->next = *(SH_LIST **)list;
386 OPENSSL_assert(temp->next == NULL || WITHIN_ARENA(temp->next));
387 temp->p_next = (SH_LIST **)list;
389 if (temp->next != NULL) {
390 OPENSSL_assert((char **)temp->next->p_next == list);
391 temp->next->p_next = &(temp->next);
397 static void sh_remove_from_list(char *ptr)
399 SH_LIST *temp, *temp2;
401 temp = (SH_LIST *)ptr;
402 if (temp->next != NULL)
403 temp->next->p_next = temp->p_next;
404 *temp->p_next = temp->next;
405 if (temp->next == NULL)
409 OPENSSL_assert(WITHIN_FREELIST(temp2->p_next) || WITHIN_ARENA(temp2->p_next));
413 static int sh_init(size_t size, size_t minsize)
421 SYSTEM_INFO systemInfo;
424 memset(&sh, 0, sizeof(sh));
426 /* make sure size is a powers of 2 */
427 OPENSSL_assert(size > 0);
428 OPENSSL_assert((size & (size - 1)) == 0);
429 if (size == 0 || (size & (size - 1)) != 0)
432 if (minsize <= sizeof(SH_LIST)) {
433 OPENSSL_assert(sizeof(SH_LIST) <= 65536);
435 * Compute the minimum possible allocation size.
436 * This must be a power of 2 and at least as large as the SH_LIST
439 minsize = sizeof(SH_LIST) - 1;
440 minsize |= minsize >> 1;
441 minsize |= minsize >> 2;
442 if (sizeof(SH_LIST) > 16)
443 minsize |= minsize >> 4;
444 if (sizeof(SH_LIST) > 256)
445 minsize |= minsize >> 8;
448 /* make sure minsize is a powers of 2 */
449 OPENSSL_assert((minsize & (minsize - 1)) == 0);
450 if ((minsize & (minsize - 1)) != 0)
454 sh.arena_size = size;
455 sh.minsize = minsize;
456 sh.bittable_size = (sh.arena_size / sh.minsize) * 2;
458 /* Prevent allocations of size 0 later on */
459 if (sh.bittable_size >> 3 == 0)
462 sh.freelist_size = -1;
463 for (i = sh.bittable_size; i; i >>= 1)
466 sh.freelist = OPENSSL_zalloc(sh.freelist_size * sizeof(char *));
467 OPENSSL_assert(sh.freelist != NULL);
468 if (sh.freelist == NULL)
471 sh.bittable = OPENSSL_zalloc(sh.bittable_size >> 3);
472 OPENSSL_assert(sh.bittable != NULL);
473 if (sh.bittable == NULL)
476 sh.bitmalloc = OPENSSL_zalloc(sh.bittable_size >> 3);
477 OPENSSL_assert(sh.bitmalloc != NULL);
478 if (sh.bitmalloc == NULL)
481 /* Allocate space for heap, and two extra pages as guards */
482 #if defined(_SC_PAGE_SIZE) || defined (_SC_PAGESIZE)
484 # if defined(_SC_PAGE_SIZE)
485 long tmppgsize = sysconf(_SC_PAGE_SIZE);
487 long tmppgsize = sysconf(_SC_PAGESIZE);
492 pgsize = (size_t)tmppgsize;
494 #elif defined(_WIN32)
495 GetSystemInfo(&systemInfo);
496 pgsize = (size_t)systemInfo.dwPageSize;
500 sh.map_size = pgsize + sh.arena_size + pgsize;
504 sh.map_result = mmap(NULL, sh.map_size,
505 PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE|MAP_CONCEAL, -1, 0);
510 sh.map_result = MAP_FAILED;
511 if ((fd = open("/dev/zero", O_RDWR)) >= 0) {
512 sh.map_result = mmap(NULL, sh.map_size,
513 PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
518 if (sh.map_result == MAP_FAILED)
521 sh.map_result = VirtualAlloc(NULL, sh.map_size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
523 if (sh.map_result == NULL)
527 sh.arena = (char *)(sh.map_result + pgsize);
528 sh_setbit(sh.arena, 0, sh.bittable);
529 sh_add_to_list(&sh.freelist[0], sh.arena);
531 /* Now try to add guard pages and lock into memory. */
535 /* Starting guard is already aligned from mmap. */
536 if (mprotect(sh.map_result, pgsize, PROT_NONE) < 0)
539 if (VirtualProtect(sh.map_result, pgsize, PAGE_NOACCESS, &flOldProtect) == FALSE)
543 /* Ending guard page - need to round up to page boundary */
544 aligned = (pgsize + sh.arena_size + (pgsize - 1)) & ~(pgsize - 1);
546 if (mprotect(sh.map_result + aligned, pgsize, PROT_NONE) < 0)
549 if (VirtualProtect(sh.map_result + aligned, pgsize, PAGE_NOACCESS, &flOldProtect) == FALSE)
553 #if defined(OPENSSL_SYS_LINUX) && defined(MLOCK_ONFAULT) && defined(SYS_mlock2)
554 if (syscall(SYS_mlock2, sh.arena, sh.arena_size, MLOCK_ONFAULT) < 0) {
555 if (errno == ENOSYS) {
556 if (mlock(sh.arena, sh.arena_size) < 0)
562 #elif defined(_WIN32)
563 if (VirtualLock(sh.arena, sh.arena_size) == FALSE)
566 if (mlock(sh.arena, sh.arena_size) < 0)
570 if (madvise(sh.arena, sh.arena_size, MADV_DONTDUMP) < 0)
581 static void sh_done(void)
583 OPENSSL_free(sh.freelist);
584 OPENSSL_free(sh.bittable);
585 OPENSSL_free(sh.bitmalloc);
587 if (sh.map_result != MAP_FAILED && sh.map_size)
588 munmap(sh.map_result, sh.map_size);
590 if (sh.map_result != NULL && sh.map_size)
591 VirtualFree(sh.map_result, 0, MEM_RELEASE);
593 memset(&sh, 0, sizeof(sh));
596 static int sh_allocated(const char *ptr)
598 return WITHIN_ARENA(ptr) ? 1 : 0;
601 static char *sh_find_my_buddy(char *ptr, int list)
606 bit = (ONE << list) + (ptr - sh.arena) / (sh.arena_size >> list);
609 if (TESTBIT(sh.bittable, bit) && !TESTBIT(sh.bitmalloc, bit))
610 chunk = sh.arena + ((bit & ((ONE << list) - 1)) * (sh.arena_size >> list));
615 static void *sh_malloc(size_t size)
617 ossl_ssize_t list, slist;
621 if (size > sh.arena_size)
624 list = sh.freelist_size - 1;
625 for (i = sh.minsize; i < size; i <<= 1)
630 /* try to find a larger entry to split */
631 for (slist = list; slist >= 0; slist--)
632 if (sh.freelist[slist] != NULL)
637 /* split larger entry */
638 while (slist != list) {
639 char *temp = sh.freelist[slist];
641 /* remove from bigger list */
642 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
643 sh_clearbit(temp, slist, sh.bittable);
644 sh_remove_from_list(temp);
645 OPENSSL_assert(temp != sh.freelist[slist]);
647 /* done with bigger list */
650 /* add to smaller list */
651 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
652 sh_setbit(temp, slist, sh.bittable);
653 sh_add_to_list(&sh.freelist[slist], temp);
654 OPENSSL_assert(sh.freelist[slist] == temp);
657 temp += sh.arena_size >> slist;
658 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
659 sh_setbit(temp, slist, sh.bittable);
660 sh_add_to_list(&sh.freelist[slist], temp);
661 OPENSSL_assert(sh.freelist[slist] == temp);
663 OPENSSL_assert(temp-(sh.arena_size >> slist) == sh_find_my_buddy(temp, slist));
666 /* peel off memory to hand back */
667 chunk = sh.freelist[list];
668 OPENSSL_assert(sh_testbit(chunk, list, sh.bittable));
669 sh_setbit(chunk, list, sh.bitmalloc);
670 sh_remove_from_list(chunk);
672 OPENSSL_assert(WITHIN_ARENA(chunk));
674 /* zero the free list header as a precaution against information leakage */
675 memset(chunk, 0, sizeof(SH_LIST));
680 static void sh_free(void *ptr)
687 OPENSSL_assert(WITHIN_ARENA(ptr));
688 if (!WITHIN_ARENA(ptr))
691 list = sh_getlist(ptr);
692 OPENSSL_assert(sh_testbit(ptr, list, sh.bittable));
693 sh_clearbit(ptr, list, sh.bitmalloc);
694 sh_add_to_list(&sh.freelist[list], ptr);
696 /* Try to coalesce two adjacent free areas. */
697 while ((buddy = sh_find_my_buddy(ptr, list)) != NULL) {
698 OPENSSL_assert(ptr == sh_find_my_buddy(buddy, list));
699 OPENSSL_assert(ptr != NULL);
700 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
701 sh_clearbit(ptr, list, sh.bittable);
702 sh_remove_from_list(ptr);
703 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
704 sh_clearbit(buddy, list, sh.bittable);
705 sh_remove_from_list(buddy);
709 /* Zero the higher addressed block's free list pointers */
710 memset(ptr > buddy ? ptr : buddy, 0, sizeof(SH_LIST));
714 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
715 sh_setbit(ptr, list, sh.bittable);
716 sh_add_to_list(&sh.freelist[list], ptr);
717 OPENSSL_assert(sh.freelist[list] == ptr);
721 static size_t sh_actual_size(char *ptr)
725 OPENSSL_assert(WITHIN_ARENA(ptr));
726 if (!WITHIN_ARENA(ptr))
728 list = sh_getlist(ptr);
729 OPENSSL_assert(sh_testbit(ptr, list, sh.bittable));
730 return sh.arena_size / (ONE << list);
732 #endif /* OPENSSL_NO_SECURE_MEMORY */