2 * Copyright 2015-2020 The OpenSSL Project Authors. All Rights Reserved.
3 * Copyright 2004-2014, Akamai Technologies. All Rights Reserved.
5 * Licensed under the Apache License 2.0 (the "License"). You may not use
6 * this file except in compliance with the License. You can obtain a copy
7 * in the file LICENSE in the source distribution or at
8 * https://www.openssl.org/source/license.html
12 * This file is in two halves. The first half implements the public API
13 * to be used by external consumers, and to be used by OpenSSL to store
14 * data in a "secure arena." The second half implements the secure arena.
15 * For details on that implementation, see below (look for uppercase
16 * "SECURE HEAP IMPLEMENTATION").
19 #include <openssl/crypto.h>
23 #ifndef OPENSSL_NO_SECURE_MEMORY
29 # if defined(OPENSSL_SYS_UNIX)
32 # include <sys/types.h>
33 # if defined(OPENSSL_SYS_UNIX)
34 # include <sys/mman.h>
35 # if defined(__FreeBSD__)
36 # define MADV_DONTDUMP MADV_NOCORE
38 # if !defined(MAP_CONCEAL)
39 # define MAP_CONCEAL 0
42 # if defined(OPENSSL_SYS_LINUX)
43 # include <sys/syscall.h>
44 # if defined(SYS_mlock2)
45 # include <linux/mman.h>
48 # include <sys/param.h>
50 # include <sys/stat.h>
54 #define CLEAR(p, s) OPENSSL_cleanse(p, s)
56 # define PAGE_SIZE 4096
58 #if !defined(MAP_ANON) && defined(MAP_ANONYMOUS)
59 # define MAP_ANON MAP_ANONYMOUS
62 #ifndef OPENSSL_NO_SECURE_MEMORY
63 static size_t secure_mem_used;
65 static int secure_mem_initialized;
67 static CRYPTO_RWLOCK *sec_malloc_lock = NULL;
70 * These are the functions that must be implemented by a secure heap (sh).
72 static int sh_init(size_t size, size_t minsize);
73 static void *sh_malloc(size_t size);
74 static void sh_free(void *ptr);
75 static void sh_done(void);
76 static size_t sh_actual_size(char *ptr);
77 static int sh_allocated(const char *ptr);
80 int CRYPTO_secure_malloc_init(size_t size, size_t minsize)
82 #ifndef OPENSSL_NO_SECURE_MEMORY
85 if (!secure_mem_initialized) {
86 sec_malloc_lock = CRYPTO_THREAD_lock_new();
87 if (sec_malloc_lock == NULL)
89 if ((ret = sh_init(size, minsize)) != 0) {
90 secure_mem_initialized = 1;
92 CRYPTO_THREAD_lock_free(sec_malloc_lock);
93 sec_malloc_lock = NULL;
100 #endif /* OPENSSL_NO_SECURE_MEMORY */
103 int CRYPTO_secure_malloc_done(void)
105 #ifndef OPENSSL_NO_SECURE_MEMORY
106 if (secure_mem_used == 0) {
108 secure_mem_initialized = 0;
109 CRYPTO_THREAD_lock_free(sec_malloc_lock);
110 sec_malloc_lock = NULL;
113 #endif /* OPENSSL_NO_SECURE_MEMORY */
117 int CRYPTO_secure_malloc_initialized(void)
119 #ifndef OPENSSL_NO_SECURE_MEMORY
120 return secure_mem_initialized;
123 #endif /* OPENSSL_NO_SECURE_MEMORY */
126 void *CRYPTO_secure_malloc(size_t num, const char *file, int line)
128 #ifndef OPENSSL_NO_SECURE_MEMORY
132 if (!secure_mem_initialized) {
133 return CRYPTO_malloc(num, file, line);
135 CRYPTO_THREAD_write_lock(sec_malloc_lock);
136 ret = sh_malloc(num);
137 actual_size = ret ? sh_actual_size(ret) : 0;
138 secure_mem_used += actual_size;
139 CRYPTO_THREAD_unlock(sec_malloc_lock);
142 return CRYPTO_malloc(num, file, line);
143 #endif /* OPENSSL_NO_SECURE_MEMORY */
146 void *CRYPTO_secure_zalloc(size_t num, const char *file, int line)
148 #ifndef OPENSSL_NO_SECURE_MEMORY
149 if (secure_mem_initialized)
150 /* CRYPTO_secure_malloc() zeroes allocations when it is implemented */
151 return CRYPTO_secure_malloc(num, file, line);
153 return CRYPTO_zalloc(num, file, line);
156 void CRYPTO_secure_free(void *ptr, const char *file, int line)
158 #ifndef OPENSSL_NO_SECURE_MEMORY
163 if (!CRYPTO_secure_allocated(ptr)) {
164 CRYPTO_free(ptr, file, line);
167 CRYPTO_THREAD_write_lock(sec_malloc_lock);
168 actual_size = sh_actual_size(ptr);
169 CLEAR(ptr, actual_size);
170 secure_mem_used -= actual_size;
172 CRYPTO_THREAD_unlock(sec_malloc_lock);
174 CRYPTO_free(ptr, file, line);
175 #endif /* OPENSSL_NO_SECURE_MEMORY */
178 void CRYPTO_secure_clear_free(void *ptr, size_t num,
179 const char *file, int line)
181 #ifndef OPENSSL_NO_SECURE_MEMORY
186 if (!CRYPTO_secure_allocated(ptr)) {
187 OPENSSL_cleanse(ptr, num);
188 CRYPTO_free(ptr, file, line);
191 CRYPTO_THREAD_write_lock(sec_malloc_lock);
192 actual_size = sh_actual_size(ptr);
193 CLEAR(ptr, actual_size);
194 secure_mem_used -= actual_size;
196 CRYPTO_THREAD_unlock(sec_malloc_lock);
200 OPENSSL_cleanse(ptr, num);
201 CRYPTO_free(ptr, file, line);
202 #endif /* OPENSSL_NO_SECURE_MEMORY */
205 int CRYPTO_secure_allocated(const void *ptr)
207 #ifndef OPENSSL_NO_SECURE_MEMORY
210 if (!secure_mem_initialized)
212 CRYPTO_THREAD_write_lock(sec_malloc_lock);
213 ret = sh_allocated(ptr);
214 CRYPTO_THREAD_unlock(sec_malloc_lock);
218 #endif /* OPENSSL_NO_SECURE_MEMORY */
221 size_t CRYPTO_secure_used(void)
223 #ifndef OPENSSL_NO_SECURE_MEMORY
224 return secure_mem_used;
227 #endif /* OPENSSL_NO_SECURE_MEMORY */
230 size_t CRYPTO_secure_actual_size(void *ptr)
232 #ifndef OPENSSL_NO_SECURE_MEMORY
235 CRYPTO_THREAD_write_lock(sec_malloc_lock);
236 actual_size = sh_actual_size(ptr);
237 CRYPTO_THREAD_unlock(sec_malloc_lock);
245 * SECURE HEAP IMPLEMENTATION
247 #ifndef OPENSSL_NO_SECURE_MEMORY
251 * The implementation provided here uses a fixed-sized mmap() heap,
252 * which is locked into memory, not written to core files, and protected
253 * on either side by an unmapped page, which will catch pointer overruns
254 * (or underruns) and an attempt to read data out of the secure heap.
255 * Free'd memory is zero'd or otherwise cleansed.
257 * This is a pretty standard buddy allocator. We keep areas in a multiple
258 * of "sh.minsize" units. The freelist and bitmaps are kept separately,
259 * so all (and only) data is kept in the mmap'd heap.
261 * This code assumes eight-bit bytes. The numbers 3 and 7 are all over the
265 #define ONE ((size_t)1)
267 # define TESTBIT(t, b) (t[(b) >> 3] & (ONE << ((b) & 7)))
268 # define SETBIT(t, b) (t[(b) >> 3] |= (ONE << ((b) & 7)))
269 # define CLEARBIT(t, b) (t[(b) >> 3] &= (0xFF & ~(ONE << ((b) & 7))))
271 #define WITHIN_ARENA(p) \
272 ((char*)(p) >= sh.arena && (char*)(p) < &sh.arena[sh.arena_size])
273 #define WITHIN_FREELIST(p) \
274 ((char*)(p) >= (char*)sh.freelist && (char*)(p) < (char*)&sh.freelist[sh.freelist_size])
277 typedef struct sh_list_st
279 struct sh_list_st *next;
280 struct sh_list_st **p_next;
290 ossl_ssize_t freelist_size;
292 unsigned char *bittable;
293 unsigned char *bitmalloc;
294 size_t bittable_size; /* size in bits */
299 static size_t sh_getlist(char *ptr)
301 ossl_ssize_t list = sh.freelist_size - 1;
302 size_t bit = (sh.arena_size + ptr - sh.arena) / sh.minsize;
304 for (; bit; bit >>= 1, list--) {
305 if (TESTBIT(sh.bittable, bit))
307 OPENSSL_assert((bit & 1) == 0);
314 static int sh_testbit(char *ptr, int list, unsigned char *table)
318 OPENSSL_assert(list >= 0 && list < sh.freelist_size);
319 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
320 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
321 OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
322 return TESTBIT(table, bit);
325 static void sh_clearbit(char *ptr, int list, unsigned char *table)
329 OPENSSL_assert(list >= 0 && list < sh.freelist_size);
330 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
331 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
332 OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
333 OPENSSL_assert(TESTBIT(table, bit));
334 CLEARBIT(table, bit);
337 static void sh_setbit(char *ptr, int list, unsigned char *table)
341 OPENSSL_assert(list >= 0 && list < sh.freelist_size);
342 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
343 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
344 OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
345 OPENSSL_assert(!TESTBIT(table, bit));
349 static void sh_add_to_list(char **list, char *ptr)
353 OPENSSL_assert(WITHIN_FREELIST(list));
354 OPENSSL_assert(WITHIN_ARENA(ptr));
356 temp = (SH_LIST *)ptr;
357 temp->next = *(SH_LIST **)list;
358 OPENSSL_assert(temp->next == NULL || WITHIN_ARENA(temp->next));
359 temp->p_next = (SH_LIST **)list;
361 if (temp->next != NULL) {
362 OPENSSL_assert((char **)temp->next->p_next == list);
363 temp->next->p_next = &(temp->next);
369 static void sh_remove_from_list(char *ptr)
371 SH_LIST *temp, *temp2;
373 temp = (SH_LIST *)ptr;
374 if (temp->next != NULL)
375 temp->next->p_next = temp->p_next;
376 *temp->p_next = temp->next;
377 if (temp->next == NULL)
381 OPENSSL_assert(WITHIN_FREELIST(temp2->p_next) || WITHIN_ARENA(temp2->p_next));
385 static int sh_init(size_t size, size_t minsize)
393 SYSTEM_INFO systemInfo;
396 memset(&sh, 0, sizeof(sh));
398 /* make sure size is a powers of 2 */
399 OPENSSL_assert(size > 0);
400 OPENSSL_assert((size & (size - 1)) == 0);
401 if (size == 0 || (size & (size - 1)) != 0)
404 if (minsize <= sizeof(SH_LIST)) {
405 OPENSSL_assert(sizeof(SH_LIST) <= 65536);
407 * Compute the minimum possible allocation size.
408 * This must be a power of 2 and at least as large as the SH_LIST
411 minsize = sizeof(SH_LIST) - 1;
412 minsize |= minsize >> 1;
413 minsize |= minsize >> 2;
414 if (sizeof(SH_LIST) > 16)
415 minsize |= minsize >> 4;
416 if (sizeof(SH_LIST) > 256)
417 minsize |= minsize >> 8;
420 /* make sure minsize is a powers of 2 */
421 OPENSSL_assert((minsize & (minsize - 1)) == 0);
422 if ((minsize & (minsize - 1)) != 0)
426 sh.arena_size = size;
427 sh.minsize = minsize;
428 sh.bittable_size = (sh.arena_size / sh.minsize) * 2;
430 /* Prevent allocations of size 0 later on */
431 if (sh.bittable_size >> 3 == 0)
434 sh.freelist_size = -1;
435 for (i = sh.bittable_size; i; i >>= 1)
438 sh.freelist = OPENSSL_zalloc(sh.freelist_size * sizeof(char *));
439 OPENSSL_assert(sh.freelist != NULL);
440 if (sh.freelist == NULL)
443 sh.bittable = OPENSSL_zalloc(sh.bittable_size >> 3);
444 OPENSSL_assert(sh.bittable != NULL);
445 if (sh.bittable == NULL)
448 sh.bitmalloc = OPENSSL_zalloc(sh.bittable_size >> 3);
449 OPENSSL_assert(sh.bitmalloc != NULL);
450 if (sh.bitmalloc == NULL)
453 /* Allocate space for heap, and two extra pages as guards */
454 #if defined(_SC_PAGE_SIZE) || defined (_SC_PAGESIZE)
456 # if defined(_SC_PAGE_SIZE)
457 long tmppgsize = sysconf(_SC_PAGE_SIZE);
459 long tmppgsize = sysconf(_SC_PAGESIZE);
464 pgsize = (size_t)tmppgsize;
466 #elif defined(_WIN32)
467 GetSystemInfo(&systemInfo);
468 pgsize = (size_t)systemInfo.dwPageSize;
472 sh.map_size = pgsize + sh.arena_size + pgsize;
476 sh.map_result = mmap(NULL, sh.map_size,
477 PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE|MAP_CONCEAL, -1, 0);
482 sh.map_result = MAP_FAILED;
483 if ((fd = open("/dev/zero", O_RDWR)) >= 0) {
484 sh.map_result = mmap(NULL, sh.map_size,
485 PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
490 if (sh.map_result == MAP_FAILED)
493 sh.map_result = VirtualAlloc(NULL, sh.map_size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
495 if (sh.map_result == NULL)
499 sh.arena = (char *)(sh.map_result + pgsize);
500 sh_setbit(sh.arena, 0, sh.bittable);
501 sh_add_to_list(&sh.freelist[0], sh.arena);
503 /* Now try to add guard pages and lock into memory. */
507 /* Starting guard is already aligned from mmap. */
508 if (mprotect(sh.map_result, pgsize, PROT_NONE) < 0)
511 if (VirtualProtect(sh.map_result, pgsize, PAGE_NOACCESS, &flOldProtect) == FALSE)
515 /* Ending guard page - need to round up to page boundary */
516 aligned = (pgsize + sh.arena_size + (pgsize - 1)) & ~(pgsize - 1);
518 if (mprotect(sh.map_result + aligned, pgsize, PROT_NONE) < 0)
521 if (VirtualProtect(sh.map_result + aligned, pgsize, PAGE_NOACCESS, &flOldProtect) == FALSE)
525 #if defined(OPENSSL_SYS_LINUX) && defined(MLOCK_ONFAULT) && defined(SYS_mlock2)
526 if (syscall(SYS_mlock2, sh.arena, sh.arena_size, MLOCK_ONFAULT) < 0) {
527 if (errno == ENOSYS) {
528 if (mlock(sh.arena, sh.arena_size) < 0)
534 #elif defined(_WIN32)
535 if (VirtualLock(sh.arena, sh.arena_size) == FALSE)
538 if (mlock(sh.arena, sh.arena_size) < 0)
542 if (madvise(sh.arena, sh.arena_size, MADV_DONTDUMP) < 0)
553 static void sh_done(void)
555 OPENSSL_free(sh.freelist);
556 OPENSSL_free(sh.bittable);
557 OPENSSL_free(sh.bitmalloc);
559 if (sh.map_result != MAP_FAILED && sh.map_size)
560 munmap(sh.map_result, sh.map_size);
562 if (sh.map_result != NULL && sh.map_size)
563 VirtualFree(sh.map_result, 0, MEM_RELEASE);
565 memset(&sh, 0, sizeof(sh));
568 static int sh_allocated(const char *ptr)
570 return WITHIN_ARENA(ptr) ? 1 : 0;
573 static char *sh_find_my_buddy(char *ptr, int list)
578 bit = (ONE << list) + (ptr - sh.arena) / (sh.arena_size >> list);
581 if (TESTBIT(sh.bittable, bit) && !TESTBIT(sh.bitmalloc, bit))
582 chunk = sh.arena + ((bit & ((ONE << list) - 1)) * (sh.arena_size >> list));
587 static void *sh_malloc(size_t size)
589 ossl_ssize_t list, slist;
593 if (size > sh.arena_size)
596 list = sh.freelist_size - 1;
597 for (i = sh.minsize; i < size; i <<= 1)
602 /* try to find a larger entry to split */
603 for (slist = list; slist >= 0; slist--)
604 if (sh.freelist[slist] != NULL)
609 /* split larger entry */
610 while (slist != list) {
611 char *temp = sh.freelist[slist];
613 /* remove from bigger list */
614 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
615 sh_clearbit(temp, slist, sh.bittable);
616 sh_remove_from_list(temp);
617 OPENSSL_assert(temp != sh.freelist[slist]);
619 /* done with bigger list */
622 /* add to smaller list */
623 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
624 sh_setbit(temp, slist, sh.bittable);
625 sh_add_to_list(&sh.freelist[slist], temp);
626 OPENSSL_assert(sh.freelist[slist] == temp);
629 temp += sh.arena_size >> slist;
630 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
631 sh_setbit(temp, slist, sh.bittable);
632 sh_add_to_list(&sh.freelist[slist], temp);
633 OPENSSL_assert(sh.freelist[slist] == temp);
635 OPENSSL_assert(temp-(sh.arena_size >> slist) == sh_find_my_buddy(temp, slist));
638 /* peel off memory to hand back */
639 chunk = sh.freelist[list];
640 OPENSSL_assert(sh_testbit(chunk, list, sh.bittable));
641 sh_setbit(chunk, list, sh.bitmalloc);
642 sh_remove_from_list(chunk);
644 OPENSSL_assert(WITHIN_ARENA(chunk));
646 /* zero the free list header as a precaution against information leakage */
647 memset(chunk, 0, sizeof(SH_LIST));
652 static void sh_free(void *ptr)
659 OPENSSL_assert(WITHIN_ARENA(ptr));
660 if (!WITHIN_ARENA(ptr))
663 list = sh_getlist(ptr);
664 OPENSSL_assert(sh_testbit(ptr, list, sh.bittable));
665 sh_clearbit(ptr, list, sh.bitmalloc);
666 sh_add_to_list(&sh.freelist[list], ptr);
668 /* Try to coalesce two adjacent free areas. */
669 while ((buddy = sh_find_my_buddy(ptr, list)) != NULL) {
670 OPENSSL_assert(ptr == sh_find_my_buddy(buddy, list));
671 OPENSSL_assert(ptr != NULL);
672 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
673 sh_clearbit(ptr, list, sh.bittable);
674 sh_remove_from_list(ptr);
675 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
676 sh_clearbit(buddy, list, sh.bittable);
677 sh_remove_from_list(buddy);
681 /* Zero the higher addressed block's free list pointers */
682 memset(ptr > buddy ? ptr : buddy, 0, sizeof(SH_LIST));
686 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
687 sh_setbit(ptr, list, sh.bittable);
688 sh_add_to_list(&sh.freelist[list], ptr);
689 OPENSSL_assert(sh.freelist[list] == ptr);
693 static size_t sh_actual_size(char *ptr)
697 OPENSSL_assert(WITHIN_ARENA(ptr));
698 if (!WITHIN_ARENA(ptr))
700 list = sh_getlist(ptr);
701 OPENSSL_assert(sh_testbit(ptr, list, sh.bittable));
702 return sh.arena_size / (ONE << list);
704 #endif /* OPENSSL_NO_SECURE_MEMORY */