2 * Copyright 2015-2018 The OpenSSL Project Authors. All Rights Reserved.
3 * Copyright 2004-2014, Akamai Technologies. All Rights Reserved.
5 * Licensed under the OpenSSL license (the "License"). You may not use
6 * this file except in compliance with the License. You can obtain a copy
7 * in the file LICENSE in the source distribution or at
8 * https://www.openssl.org/source/license.html
12 * This file is in two halves. The first half implements the public API
13 * to be used by external consumers, and to be used by OpenSSL to store
14 * data in a "secure arena." The second half implements the secure arena.
15 * For details on that implementation, see below (look for uppercase
16 * "SECURE HEAP IMPLEMENTATION").
19 #include <openssl/crypto.h>
23 /* e_os.h includes unistd.h, which defines _POSIX_VERSION */
24 #if !defined(OPENSSL_NO_SECURE_MEMORY) && defined(OPENSSL_SYS_UNIX) \
25 && ( (defined(_POSIX_VERSION) && _POSIX_VERSION >= 200112L) \
26 || defined(__sun) || defined(__hpux) || defined(__sgi) \
32 # include <sys/types.h>
33 # include <sys/mman.h>
34 # if defined(OPENSSL_SYS_LINUX)
35 # include <sys/syscall.h>
36 # if defined(SYS_mlock2)
37 # include <linux/mman.h>
41 # include <sys/param.h>
42 # include <sys/stat.h>
46 #define CLEAR(p, s) OPENSSL_cleanse(p, s)
48 # define PAGE_SIZE 4096
50 #if !defined(MAP_ANON) && defined(MAP_ANONYMOUS)
51 # define MAP_ANON MAP_ANONYMOUS
55 static size_t secure_mem_used;
57 static int secure_mem_initialized;
59 static CRYPTO_RWLOCK *sec_malloc_lock = NULL;
62 * These are the functions that must be implemented by a secure heap (sh).
64 static int sh_init(size_t size, int minsize);
65 static void *sh_malloc(size_t size);
66 static void sh_free(void *ptr);
67 static void sh_done(void);
68 static size_t sh_actual_size(char *ptr);
69 static int sh_allocated(const char *ptr);
72 int CRYPTO_secure_malloc_init(size_t size, int minsize)
77 if (!secure_mem_initialized) {
78 sec_malloc_lock = CRYPTO_THREAD_lock_new();
79 if (sec_malloc_lock == NULL)
81 if ((ret = sh_init(size, minsize)) != 0) {
82 secure_mem_initialized = 1;
84 CRYPTO_THREAD_lock_free(sec_malloc_lock);
85 sec_malloc_lock = NULL;
92 #endif /* IMPLEMENTED */
95 int CRYPTO_secure_malloc_done(void)
98 if (secure_mem_used == 0) {
100 secure_mem_initialized = 0;
101 CRYPTO_THREAD_lock_free(sec_malloc_lock);
102 sec_malloc_lock = NULL;
105 #endif /* IMPLEMENTED */
109 int CRYPTO_secure_malloc_initialized(void)
112 return secure_mem_initialized;
115 #endif /* IMPLEMENTED */
118 void *CRYPTO_secure_malloc(size_t num, const char *file, int line)
124 if (!secure_mem_initialized) {
125 return CRYPTO_malloc(num, file, line);
127 CRYPTO_THREAD_write_lock(sec_malloc_lock);
128 ret = sh_malloc(num);
129 actual_size = ret ? sh_actual_size(ret) : 0;
130 secure_mem_used += actual_size;
131 CRYPTO_THREAD_unlock(sec_malloc_lock);
134 return CRYPTO_malloc(num, file, line);
135 #endif /* IMPLEMENTED */
138 void *CRYPTO_secure_zalloc(size_t num, const char *file, int line)
141 if (secure_mem_initialized)
142 /* CRYPTO_secure_malloc() zeroes allocations when it is implemented */
143 return CRYPTO_secure_malloc(num, file, line);
145 return CRYPTO_zalloc(num, file, line);
148 void CRYPTO_secure_free(void *ptr, const char *file, int line)
155 if (!CRYPTO_secure_allocated(ptr)) {
156 CRYPTO_free(ptr, file, line);
159 CRYPTO_THREAD_write_lock(sec_malloc_lock);
160 actual_size = sh_actual_size(ptr);
161 CLEAR(ptr, actual_size);
162 secure_mem_used -= actual_size;
164 CRYPTO_THREAD_unlock(sec_malloc_lock);
166 CRYPTO_free(ptr, file, line);
167 #endif /* IMPLEMENTED */
170 void CRYPTO_secure_clear_free(void *ptr, size_t num,
171 const char *file, int line)
178 if (!CRYPTO_secure_allocated(ptr)) {
179 OPENSSL_cleanse(ptr, num);
180 CRYPTO_free(ptr, file, line);
183 CRYPTO_THREAD_write_lock(sec_malloc_lock);
184 actual_size = sh_actual_size(ptr);
185 CLEAR(ptr, actual_size);
186 secure_mem_used -= actual_size;
188 CRYPTO_THREAD_unlock(sec_malloc_lock);
192 OPENSSL_cleanse(ptr, num);
193 CRYPTO_free(ptr, file, line);
194 #endif /* IMPLEMENTED */
197 int CRYPTO_secure_allocated(const void *ptr)
202 if (!secure_mem_initialized)
204 CRYPTO_THREAD_write_lock(sec_malloc_lock);
205 ret = sh_allocated(ptr);
206 CRYPTO_THREAD_unlock(sec_malloc_lock);
210 #endif /* IMPLEMENTED */
213 size_t CRYPTO_secure_used(void)
216 return secure_mem_used;
219 #endif /* IMPLEMENTED */
222 size_t CRYPTO_secure_actual_size(void *ptr)
227 CRYPTO_THREAD_write_lock(sec_malloc_lock);
228 actual_size = sh_actual_size(ptr);
229 CRYPTO_THREAD_unlock(sec_malloc_lock);
240 * SECURE HEAP IMPLEMENTATION
246 * The implementation provided here uses a fixed-sized mmap() heap,
247 * which is locked into memory, not written to core files, and protected
248 * on either side by an unmapped page, which will catch pointer overruns
249 * (or underruns) and an attempt to read data out of the secure heap.
250 * Free'd memory is zero'd or otherwise cleansed.
252 * This is a pretty standard buddy allocator. We keep areas in a multiple
253 * of "sh.minsize" units. The freelist and bitmaps are kept separately,
254 * so all (and only) data is kept in the mmap'd heap.
256 * This code assumes eight-bit bytes. The numbers 3 and 7 are all over the
260 #define ONE ((size_t)1)
262 # define TESTBIT(t, b) (t[(b) >> 3] & (ONE << ((b) & 7)))
263 # define SETBIT(t, b) (t[(b) >> 3] |= (ONE << ((b) & 7)))
264 # define CLEARBIT(t, b) (t[(b) >> 3] &= (0xFF & ~(ONE << ((b) & 7))))
266 #define WITHIN_ARENA(p) \
267 ((char*)(p) >= sh.arena && (char*)(p) < &sh.arena[sh.arena_size])
268 #define WITHIN_FREELIST(p) \
269 ((char*)(p) >= (char*)sh.freelist && (char*)(p) < (char*)&sh.freelist[sh.freelist_size])
272 typedef struct sh_list_st
274 struct sh_list_st *next;
275 struct sh_list_st **p_next;
285 ossl_ssize_t freelist_size;
287 unsigned char *bittable;
288 unsigned char *bitmalloc;
289 size_t bittable_size; /* size in bits */
294 static size_t sh_getlist(char *ptr)
296 ossl_ssize_t list = sh.freelist_size - 1;
297 size_t bit = (sh.arena_size + ptr - sh.arena) / sh.minsize;
299 for (; bit; bit >>= 1, list--) {
300 if (TESTBIT(sh.bittable, bit))
302 OPENSSL_assert((bit & 1) == 0);
309 static int sh_testbit(char *ptr, int list, unsigned char *table)
313 OPENSSL_assert(list >= 0 && list < sh.freelist_size);
314 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
315 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
316 OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
317 return TESTBIT(table, bit);
320 static void sh_clearbit(char *ptr, int list, unsigned char *table)
324 OPENSSL_assert(list >= 0 && list < sh.freelist_size);
325 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
326 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
327 OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
328 OPENSSL_assert(TESTBIT(table, bit));
329 CLEARBIT(table, bit);
332 static void sh_setbit(char *ptr, int list, unsigned char *table)
336 OPENSSL_assert(list >= 0 && list < sh.freelist_size);
337 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
338 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
339 OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
340 OPENSSL_assert(!TESTBIT(table, bit));
344 static void sh_add_to_list(char **list, char *ptr)
348 OPENSSL_assert(WITHIN_FREELIST(list));
349 OPENSSL_assert(WITHIN_ARENA(ptr));
351 temp = (SH_LIST *)ptr;
352 temp->next = *(SH_LIST **)list;
353 OPENSSL_assert(temp->next == NULL || WITHIN_ARENA(temp->next));
354 temp->p_next = (SH_LIST **)list;
356 if (temp->next != NULL) {
357 OPENSSL_assert((char **)temp->next->p_next == list);
358 temp->next->p_next = &(temp->next);
364 static void sh_remove_from_list(char *ptr)
366 SH_LIST *temp, *temp2;
368 temp = (SH_LIST *)ptr;
369 if (temp->next != NULL)
370 temp->next->p_next = temp->p_next;
371 *temp->p_next = temp->next;
372 if (temp->next == NULL)
376 OPENSSL_assert(WITHIN_FREELIST(temp2->p_next) || WITHIN_ARENA(temp2->p_next));
380 static int sh_init(size_t size, int minsize)
387 memset(&sh, 0, sizeof(sh));
389 /* make sure size and minsize are powers of 2 */
390 OPENSSL_assert(size > 0);
391 OPENSSL_assert((size & (size - 1)) == 0);
392 OPENSSL_assert(minsize > 0);
393 OPENSSL_assert((minsize & (minsize - 1)) == 0);
394 if (size <= 0 || (size & (size - 1)) != 0)
396 if (minsize <= 0 || (minsize & (minsize - 1)) != 0)
399 while (minsize < (int)sizeof(SH_LIST))
402 sh.arena_size = size;
403 sh.minsize = minsize;
404 sh.bittable_size = (sh.arena_size / sh.minsize) * 2;
406 /* Prevent allocations of size 0 later on */
407 if (sh.bittable_size >> 3 == 0)
410 sh.freelist_size = -1;
411 for (i = sh.bittable_size; i; i >>= 1)
414 sh.freelist = OPENSSL_zalloc(sh.freelist_size * sizeof(char *));
415 OPENSSL_assert(sh.freelist != NULL);
416 if (sh.freelist == NULL)
419 sh.bittable = OPENSSL_zalloc(sh.bittable_size >> 3);
420 OPENSSL_assert(sh.bittable != NULL);
421 if (sh.bittable == NULL)
424 sh.bitmalloc = OPENSSL_zalloc(sh.bittable_size >> 3);
425 OPENSSL_assert(sh.bitmalloc != NULL);
426 if (sh.bitmalloc == NULL)
429 /* Allocate space for heap, and two extra pages as guards */
430 #if defined(_SC_PAGE_SIZE) || defined (_SC_PAGESIZE)
432 # if defined(_SC_PAGE_SIZE)
433 long tmppgsize = sysconf(_SC_PAGE_SIZE);
435 long tmppgsize = sysconf(_SC_PAGESIZE);
440 pgsize = (size_t)tmppgsize;
445 sh.map_size = pgsize + sh.arena_size + pgsize;
448 sh.map_result = mmap(NULL, sh.map_size,
449 PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
454 sh.map_result = MAP_FAILED;
455 if ((fd = open("/dev/zero", O_RDWR)) >= 0) {
456 sh.map_result = mmap(NULL, sh.map_size,
457 PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
461 if (sh.map_result == MAP_FAILED)
463 sh.arena = (char *)(sh.map_result + pgsize);
464 sh_setbit(sh.arena, 0, sh.bittable);
465 sh_add_to_list(&sh.freelist[0], sh.arena);
467 /* Now try to add guard pages and lock into memory. */
470 /* Starting guard is already aligned from mmap. */
471 if (mprotect(sh.map_result, pgsize, PROT_NONE) < 0)
474 /* Ending guard page - need to round up to page boundary */
475 aligned = (pgsize + sh.arena_size + (pgsize - 1)) & ~(pgsize - 1);
476 if (mprotect(sh.map_result + aligned, pgsize, PROT_NONE) < 0)
479 #if defined(OPENSSL_SYS_LINUX) && defined(MLOCK_ONFAULT) && defined(SYS_mlock2)
480 if (syscall(SYS_mlock2, sh.arena, sh.arena_size, MLOCK_ONFAULT) < 0) {
481 if (errno == ENOSYS) {
482 if (mlock(sh.arena, sh.arena_size) < 0)
489 if (mlock(sh.arena, sh.arena_size) < 0)
493 if (madvise(sh.arena, sh.arena_size, MADV_DONTDUMP) < 0)
504 static void sh_done(void)
506 OPENSSL_free(sh.freelist);
507 OPENSSL_free(sh.bittable);
508 OPENSSL_free(sh.bitmalloc);
509 if (sh.map_result != NULL && sh.map_size)
510 munmap(sh.map_result, sh.map_size);
511 memset(&sh, 0, sizeof(sh));
514 static int sh_allocated(const char *ptr)
516 return WITHIN_ARENA(ptr) ? 1 : 0;
519 static char *sh_find_my_buddy(char *ptr, int list)
524 bit = (ONE << list) + (ptr - sh.arena) / (sh.arena_size >> list);
527 if (TESTBIT(sh.bittable, bit) && !TESTBIT(sh.bitmalloc, bit))
528 chunk = sh.arena + ((bit & ((ONE << list) - 1)) * (sh.arena_size >> list));
533 static void *sh_malloc(size_t size)
535 ossl_ssize_t list, slist;
539 if (size > sh.arena_size)
542 list = sh.freelist_size - 1;
543 for (i = sh.minsize; i < size; i <<= 1)
548 /* try to find a larger entry to split */
549 for (slist = list; slist >= 0; slist--)
550 if (sh.freelist[slist] != NULL)
555 /* split larger entry */
556 while (slist != list) {
557 char *temp = sh.freelist[slist];
559 /* remove from bigger list */
560 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
561 sh_clearbit(temp, slist, sh.bittable);
562 sh_remove_from_list(temp);
563 OPENSSL_assert(temp != sh.freelist[slist]);
565 /* done with bigger list */
568 /* add to smaller list */
569 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
570 sh_setbit(temp, slist, sh.bittable);
571 sh_add_to_list(&sh.freelist[slist], temp);
572 OPENSSL_assert(sh.freelist[slist] == temp);
575 temp += sh.arena_size >> slist;
576 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
577 sh_setbit(temp, slist, sh.bittable);
578 sh_add_to_list(&sh.freelist[slist], temp);
579 OPENSSL_assert(sh.freelist[slist] == temp);
581 OPENSSL_assert(temp-(sh.arena_size >> slist) == sh_find_my_buddy(temp, slist));
584 /* peel off memory to hand back */
585 chunk = sh.freelist[list];
586 OPENSSL_assert(sh_testbit(chunk, list, sh.bittable));
587 sh_setbit(chunk, list, sh.bitmalloc);
588 sh_remove_from_list(chunk);
590 OPENSSL_assert(WITHIN_ARENA(chunk));
592 /* zero the free list header as a precaution against information leakage */
593 memset(chunk, 0, sizeof(SH_LIST));
598 static void sh_free(void *ptr)
605 OPENSSL_assert(WITHIN_ARENA(ptr));
606 if (!WITHIN_ARENA(ptr))
609 list = sh_getlist(ptr);
610 OPENSSL_assert(sh_testbit(ptr, list, sh.bittable));
611 sh_clearbit(ptr, list, sh.bitmalloc);
612 sh_add_to_list(&sh.freelist[list], ptr);
614 /* Try to coalesce two adjacent free areas. */
615 while ((buddy = sh_find_my_buddy(ptr, list)) != NULL) {
616 OPENSSL_assert(ptr == sh_find_my_buddy(buddy, list));
617 OPENSSL_assert(ptr != NULL);
618 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
619 sh_clearbit(ptr, list, sh.bittable);
620 sh_remove_from_list(ptr);
621 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
622 sh_clearbit(buddy, list, sh.bittable);
623 sh_remove_from_list(buddy);
627 /* Zero the higher addressed block's free list pointers */
628 memset(ptr > buddy ? ptr : buddy, 0, sizeof(SH_LIST));
632 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
633 sh_setbit(ptr, list, sh.bittable);
634 sh_add_to_list(&sh.freelist[list], ptr);
635 OPENSSL_assert(sh.freelist[list] == ptr);
639 static size_t sh_actual_size(char *ptr)
643 OPENSSL_assert(WITHIN_ARENA(ptr));
644 if (!WITHIN_ARENA(ptr))
646 list = sh_getlist(ptr);
647 OPENSSL_assert(sh_testbit(ptr, list, sh.bittable));
648 return sh.arena_size / (ONE << list);
650 #endif /* IMPLEMENTED */