2 * Copyright 2016-2024 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 /* We need to use the OPENSSL_fork_*() deprecated APIs */
11 #define OPENSSL_SUPPRESS_DEPRECATED
13 #include <openssl/crypto.h>
14 #include <crypto/cryptlib.h>
15 #include "internal/cryptlib.h"
16 #include "internal/rcu.h"
17 #include "rcu_internal.h"
23 #if defined(__apple_build_version__) && __apple_build_version__ < 6000000
25 * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
26 * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
27 * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
28 * All of this makes impossible to use __atomic_is_lock_free here.
30 * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
32 #define BROKEN_CLANG_ATOMICS
35 #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
37 # if defined(OPENSSL_SYS_UNIX)
38 # include <sys/types.h>
44 # ifdef PTHREAD_RWLOCK_INITIALIZER
48 # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
49 # if defined(__APPLE__) && defined(__clang__) && defined(__aarch64__)
51 * Apple M1 virtualized cpu seems to have some problem using the ldapr instruction
52 * (see https://github.com/openssl/openssl/pull/23974)
53 * When using the native apple clang compiler, this instruction is emitted for
54 * atomic loads, which is bad. So, if
55 * 1) We are building on a target that defines __APPLE__ AND
56 * 2) We are building on a target using clang (__clang__) AND
57 * 3) We are building for an M1 processor (__aarch64__)
58 * Then we shold not use __atomic_load_n and instead implement our own
59 * function to issue the ldar instruction instead, which procuces the proper
60 * sequencing guarantees
62 static inline void *apple_atomic_load_n(void **p)
66 __asm volatile("ldar %0, [%1]" : "=r" (ret): "r" (p):);
71 # define ATOMIC_LOAD_N(p, o) apple_atomic_load_n((void **)p)
73 # define ATOMIC_LOAD_N(p,o) __atomic_load_n(p, o)
75 # define ATOMIC_STORE_N(p, v, o) __atomic_store_n(p, v, o)
76 # define ATOMIC_STORE(p, v, o) __atomic_store(p, v, o)
77 # define ATOMIC_EXCHANGE_N(p, v, o) __atomic_exchange_n(p, v, o)
78 # define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
79 # define ATOMIC_FETCH_ADD(p, v, o) __atomic_fetch_add(p, v, o)
80 # define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
81 # define ATOMIC_AND_FETCH(p, m, o) __atomic_and_fetch(p, m, o)
82 # define ATOMIC_OR_FETCH(p, m, o) __atomic_or_fetch(p, m, o)
84 static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
86 static inline void *fallback_atomic_load_n(void **p)
90 pthread_mutex_lock(&atomic_sim_lock);
92 pthread_mutex_unlock(&atomic_sim_lock);
96 # define ATOMIC_LOAD_N(p, o) fallback_atomic_load_n((void **)p)
98 static inline void *fallback_atomic_store_n(void **p, void *v)
102 pthread_mutex_lock(&atomic_sim_lock);
105 pthread_mutex_unlock(&atomic_sim_lock);
109 # define ATOMIC_STORE_N(p, v, o) fallback_atomic_store_n((void **)p, (void *)v)
111 static inline void fallback_atomic_store(void **p, void **v)
115 pthread_mutex_lock(&atomic_sim_lock);
119 pthread_mutex_unlock(&atomic_sim_lock);
122 # define ATOMIC_STORE(p, v, o) fallback_atomic_store((void **)p, (void **)v)
124 static inline void *fallback_atomic_exchange_n(void **p, void *v)
128 pthread_mutex_lock(&atomic_sim_lock);
131 pthread_mutex_unlock(&atomic_sim_lock);
135 #define ATOMIC_EXCHANGE_N(p, v, o) fallback_atomic_exchange_n((void **)p, (void *)v)
137 static inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
141 pthread_mutex_lock(&atomic_sim_lock);
144 pthread_mutex_unlock(&atomic_sim_lock);
148 # define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
150 static inline uint64_t fallback_atomic_fetch_add(uint64_t *p, uint64_t v)
154 pthread_mutex_lock(&atomic_sim_lock);
157 pthread_mutex_unlock(&atomic_sim_lock);
161 # define ATOMIC_FETCH_ADD(p, v, o) fallback_atomic_fetch_add(p, v)
163 static inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
167 pthread_mutex_lock(&atomic_sim_lock);
170 pthread_mutex_unlock(&atomic_sim_lock);
174 # define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
176 static inline uint64_t fallback_atomic_and_fetch(uint64_t *p, uint64_t m)
180 pthread_mutex_lock(&atomic_sim_lock);
183 pthread_mutex_unlock(&atomic_sim_lock);
187 # define ATOMIC_AND_FETCH(p, v, o) fallback_atomic_and_fetch(p, v)
189 static inline uint64_t fallback_atomic_or_fetch(uint64_t *p, uint64_t m)
193 pthread_mutex_lock(&atomic_sim_lock);
196 pthread_mutex_unlock(&atomic_sim_lock);
200 # define ATOMIC_OR_FETCH(p, v, o) fallback_atomic_or_fetch(p, v)
203 static CRYPTO_THREAD_LOCAL rcu_thr_key;
206 * users is broken up into 2 parts
207 * bits 0-15 current readers
210 # define READER_SHIFT 0
212 # define READER_SIZE 16
215 # define READER_MASK (((uint64_t)1 << READER_SIZE) - 1)
216 # define ID_MASK (((uint64_t)1 << ID_SIZE) - 1)
217 # define READER_COUNT(x) (((uint64_t)(x) >> READER_SHIFT) & READER_MASK)
218 # define ID_VAL(x) (((uint64_t)(x) >> ID_SHIFT) & ID_MASK)
219 # define VAL_READER ((uint64_t)1 << READER_SHIFT)
220 # define VAL_ID(x) ((uint64_t)x << ID_SHIFT)
223 * This is the core of an rcu lock. It tracks the readers and writers for the
224 * current quiescence point for a given lock. Users is the 64 bit value that
225 * stores the READERS/ID as defined above
235 CRYPTO_RCU_LOCK *lock;
240 * This is the per thread tracking data
241 * that is assigned to each thread participating
244 * qp points to the qp that it last acquired
247 struct rcu_thr_data {
248 struct thread_qp thread_qps[MAX_QPS];
252 * This is the internal version of a CRYPTO_RCU_LOCK
253 * it is cast from CRYPTO_RCU_LOCK
256 /* Callbacks to call for next ossl_synchronize_rcu */
257 struct rcu_cb_item *cb_items;
259 /* rcu generation counter for in-order retirement */
262 /* Array of quiescent points for synchronization */
263 struct rcu_qp *qp_group;
265 /* Number of elements in qp_group array */
268 /* Index of the current qp in the qp_group array */
271 /* value of the next id_ctr value to be retired */
272 uint32_t next_to_retire;
274 /* index of the next free rcu_qp in the qp_group */
275 uint64_t current_alloc_idx;
277 /* number of qp's in qp_group array currently being retired */
278 uint32_t writers_alloced;
280 /* lock protecting write side operations */
281 pthread_mutex_t write_lock;
283 /* lock protecting updates to writers_alloced/current_alloc_idx */
284 pthread_mutex_t alloc_lock;
286 /* signal to wake threads waiting on alloc_lock */
287 pthread_cond_t alloc_signal;
289 /* lock to enforce in-order retirement */
290 pthread_mutex_t prior_lock;
292 /* signal to wake threads waiting on prior_lock */
293 pthread_cond_t prior_signal;
297 * Called on thread exit to free the pthread key
298 * associated with this thread, if any
300 static void free_rcu_thr_data(void *ptr)
302 struct rcu_thr_data *data =
303 (struct rcu_thr_data *)CRYPTO_THREAD_get_local(&rcu_thr_key);
306 CRYPTO_THREAD_set_local(&rcu_thr_key, NULL);
309 static void ossl_rcu_init(void)
311 CRYPTO_THREAD_init_local(&rcu_thr_key, NULL);
314 /* Read side acquisition of the current qp */
315 static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
319 /* get the current qp index */
322 * Notes on use of __ATOMIC_ACQUIRE
323 * We need to ensure the following:
324 * 1) That subsequent operations aren't optimized by hoisting them above
325 * this operation. Specifically, we don't want the below re-load of
326 * qp_idx to get optimized away
327 * 2) We want to ensure that any updating of reader_idx on the write side
328 * of the lock is flushed from a local cpu cache so that we see any
329 * updates prior to the load. This is a non-issue on cache coherent
330 * systems like x86, but is relevant on other arches
331 * Note: This applies to the reload below as well
333 qp_idx = (uint64_t)ATOMIC_LOAD_N(&lock->reader_idx, __ATOMIC_ACQUIRE);
336 * Notes of use of __ATOMIC_RELEASE
337 * This counter is only read by the write side of the lock, and so we
338 * specify __ATOMIC_RELEASE here to ensure that the write side of the
339 * lock see this during the spin loop read of users, as it waits for the
340 * reader count to approach zero
342 ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, VAL_READER,
345 /* if the idx hasn't changed, we're good, else try again */
346 if (qp_idx == (uint64_t)ATOMIC_LOAD_N(&lock->reader_idx, __ATOMIC_ACQUIRE))
350 * Notes on use of __ATOMIC_RELEASE
351 * As with the add above, we want to ensure that this decrement is
352 * seen by the write side of the lock as soon as it happens to prevent
353 * undue spinning waiting for write side completion
355 ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, VAL_READER,
359 return &lock->qp_group[qp_idx];
362 void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
364 struct rcu_thr_data *data;
365 int i, available_qp = -1;
368 * we're going to access current_qp here so ask the
369 * processor to fetch it
371 data = CRYPTO_THREAD_get_local(&rcu_thr_key);
374 data = OPENSSL_zalloc(sizeof(*data));
375 OPENSSL_assert(data != NULL);
376 CRYPTO_THREAD_set_local(&rcu_thr_key, data);
377 ossl_init_thread_start(NULL, NULL, free_rcu_thr_data);
380 for (i = 0; i < MAX_QPS; i++) {
381 if (data->thread_qps[i].qp == NULL && available_qp == -1)
383 /* If we have a hold on this lock already, we're good */
384 if (data->thread_qps[i].lock == lock) {
385 data->thread_qps[i].depth++;
391 * if we get here, then we don't have a hold on this lock yet
393 assert(available_qp != -1);
395 data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
396 data->thread_qps[available_qp].depth = 1;
397 data->thread_qps[available_qp].lock = lock;
400 void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
403 struct rcu_thr_data *data = CRYPTO_THREAD_get_local(&rcu_thr_key);
406 assert(data != NULL);
408 for (i = 0; i < MAX_QPS; i++) {
409 if (data->thread_qps[i].lock == lock) {
411 * As with read side acquisition, we use __ATOMIC_RELEASE here
412 * to ensure that the decrement is published immediately
413 * to any write side waiters
415 data->thread_qps[i].depth--;
416 if (data->thread_qps[i].depth == 0) {
417 ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users, VAL_READER,
419 OPENSSL_assert(ret != UINT64_MAX);
420 data->thread_qps[i].qp = NULL;
421 data->thread_qps[i].lock = NULL;
427 * If we get here, we're trying to unlock a lock that we never acquired -
434 * Write side allocation routine to get the current qp
435 * and replace it with a new one
437 static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock)
440 uint64_t current_idx;
442 pthread_mutex_lock(&lock->alloc_lock);
445 * we need at least one qp to be available with one
446 * left over, so that readers can start working on
447 * one that isn't yet being waited on
449 while (lock->group_count - lock->writers_alloced < 2)
450 /* we have to wait for one to be free */
451 pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
453 current_idx = lock->current_alloc_idx;
455 /* Allocate the qp */
456 lock->writers_alloced++;
458 /* increment the allocation index */
459 lock->current_alloc_idx =
460 (lock->current_alloc_idx + 1) % lock->group_count;
462 /* get and insert a new id */
463 new_id = lock->id_ctr;
466 new_id = VAL_ID(new_id);
468 * Even though we are under a write side lock here
469 * We need to use atomic instructions to ensure that the results
470 * of this update are published to the read side prior to updating the
473 ATOMIC_AND_FETCH(&lock->qp_group[current_idx].users, ID_MASK,
475 ATOMIC_OR_FETCH(&lock->qp_group[current_idx].users, new_id,
479 * Update the reader index to be the prior qp.
480 * Note the use of __ATOMIC_RELEASE here is based on the corresponding use
481 * of __ATOMIC_ACQUIRE in get_hold_current_qp, as we want any publication
482 * of this value to be seen on the read side immediately after it happens
484 ATOMIC_STORE_N(&lock->reader_idx, lock->current_alloc_idx,
487 /* wake up any waiters */
488 pthread_cond_signal(&lock->alloc_signal);
489 pthread_mutex_unlock(&lock->alloc_lock);
490 return &lock->qp_group[current_idx];
493 static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
495 pthread_mutex_lock(&lock->alloc_lock);
496 lock->writers_alloced--;
497 pthread_cond_signal(&lock->alloc_signal);
498 pthread_mutex_unlock(&lock->alloc_lock);
501 static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
505 OPENSSL_zalloc(sizeof(*new) * count);
507 lock->group_count = count;
511 void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
513 pthread_mutex_lock(&lock->write_lock);
516 void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
518 pthread_mutex_unlock(&lock->write_lock);
521 void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
525 struct rcu_cb_item *cb_items, *tmpcb;
528 * __ATOMIC_ACQ_REL is used here to ensure that we get any prior published
529 * writes before we read, and publish our write immediately
531 cb_items = ATOMIC_EXCHANGE_N(&lock->cb_items, NULL, __ATOMIC_ACQ_REL);
533 qp = update_qp(lock);
536 * wait for the reader count to reach zero
537 * Note the use of __ATOMIC_ACQUIRE here to ensure that any
538 * prior __ATOMIC_RELEASE write operation in get_hold_current_qp
539 * is visible prior to our read
542 count = (uint64_t)ATOMIC_LOAD_N(&qp->users, __ATOMIC_ACQUIRE);
543 } while (READER_COUNT(count) != 0);
545 /* retire in order */
546 pthread_mutex_lock(&lock->prior_lock);
547 while (lock->next_to_retire != ID_VAL(count))
548 pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
549 lock->next_to_retire++;
550 pthread_cond_broadcast(&lock->prior_signal);
551 pthread_mutex_unlock(&lock->prior_lock);
555 /* handle any callbacks that we have */
556 while (cb_items != NULL) {
558 cb_items = cb_items->next;
559 tmpcb->fn(tmpcb->data);
564 int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
566 struct rcu_cb_item *new =
567 OPENSSL_zalloc(sizeof(*new));
575 * Use __ATOMIC_ACQ_REL here to indicate that any prior writes to this
576 * list are visible to us prior to reading, and publish the new value
579 new->next = ATOMIC_EXCHANGE_N(&lock->cb_items, new, __ATOMIC_ACQ_REL);
584 void *ossl_rcu_uptr_deref(void **p)
586 return (void *)ATOMIC_LOAD_N(p, __ATOMIC_ACQUIRE);
589 void ossl_rcu_assign_uptr(void **p, void **v)
591 ATOMIC_STORE(p, v, __ATOMIC_RELEASE);
594 static CRYPTO_ONCE rcu_init_once = CRYPTO_ONCE_STATIC_INIT;
596 CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers)
598 struct rcu_lock_st *new;
600 if (!CRYPTO_THREAD_run_once(&rcu_init_once, ossl_rcu_init))
606 new = OPENSSL_zalloc(sizeof(*new));
610 pthread_mutex_init(&new->write_lock, NULL);
611 pthread_mutex_init(&new->prior_lock, NULL);
612 pthread_mutex_init(&new->alloc_lock, NULL);
613 pthread_cond_init(&new->prior_signal, NULL);
614 pthread_cond_init(&new->alloc_signal, NULL);
615 new->qp_group = allocate_new_qp_group(new, num_writers + 1);
616 if (new->qp_group == NULL) {
623 void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
625 struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
630 /* make sure we're synchronized */
631 ossl_synchronize_rcu(rlock);
633 OPENSSL_free(rlock->qp_group);
634 /* There should only be a single qp left now */
638 CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
643 if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
644 /* Don't set error, to avoid recursion blowup. */
647 if (pthread_rwlock_init(lock, NULL) != 0) {
652 pthread_mutexattr_t attr;
655 if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
656 /* Don't set error, to avoid recursion blowup. */
660 * We don't use recursive mutexes, but try to catch errors if we do.
662 pthread_mutexattr_init(&attr);
663 # if !defined (__TANDEM) && !defined (_SPT_MODEL_)
664 # if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
665 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
668 /* The SPT Thread Library does not define MUTEX attributes. */
671 if (pthread_mutex_init(lock, &attr) != 0) {
672 pthread_mutexattr_destroy(&attr);
677 pthread_mutexattr_destroy(&attr);
683 __owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
686 if (pthread_rwlock_rdlock(lock) != 0)
689 if (pthread_mutex_lock(lock) != 0) {
690 assert(errno != EDEADLK && errno != EBUSY);
698 __owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
701 if (pthread_rwlock_wrlock(lock) != 0)
704 if (pthread_mutex_lock(lock) != 0) {
705 assert(errno != EDEADLK && errno != EBUSY);
713 int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
716 if (pthread_rwlock_unlock(lock) != 0)
719 if (pthread_mutex_unlock(lock) != 0) {
720 assert(errno != EPERM);
728 void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
734 pthread_rwlock_destroy(lock);
736 pthread_mutex_destroy(lock);
743 int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
745 if (pthread_once(once, init) != 0)
751 int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
753 if (pthread_key_create(key, cleanup) != 0)
759 void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
761 return pthread_getspecific(*key);
764 int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
766 if (pthread_setspecific(*key, val) != 0)
772 int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
774 if (pthread_key_delete(*key) != 0)
780 CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
782 return pthread_self();
785 int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
787 return pthread_equal(a, b);
790 int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
792 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
793 if (__atomic_is_lock_free(sizeof(*val), val)) {
794 *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
797 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
798 /* This will work for all future Solaris versions. */
800 *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
804 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
810 if (!CRYPTO_THREAD_unlock(lock))
816 int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
819 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
820 if (__atomic_is_lock_free(sizeof(*val), val)) {
821 *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
824 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
825 /* This will work for all future Solaris versions. */
827 *ret = atomic_or_64_nv(val, op);
831 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
836 if (!CRYPTO_THREAD_unlock(lock))
842 int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
844 # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
845 if (__atomic_is_lock_free(sizeof(*val), val)) {
846 __atomic_load(val, ret, __ATOMIC_ACQUIRE);
849 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
850 /* This will work for all future Solaris versions. */
852 *ret = atomic_or_64_nv(val, 0);
856 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
859 if (!CRYPTO_THREAD_unlock(lock))
865 int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
867 # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
868 if (__atomic_is_lock_free(sizeof(*val), val)) {
869 __atomic_load(val, ret, __ATOMIC_ACQUIRE);
872 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
873 /* This will work for all future Solaris versions. */
875 *ret = (int *)atomic_or_uint_nv((unsigned int *)val, 0);
879 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
882 if (!CRYPTO_THREAD_unlock(lock))
889 int openssl_init_fork_handlers(void)
893 # endif /* FIPS_MODULE */
895 int openssl_get_fork_id(void)