2 * Copyright 2016-2024 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 /* We need to use the OPENSSL_fork_*() deprecated APIs */
11 #define OPENSSL_SUPPRESS_DEPRECATED
13 #include <openssl/crypto.h>
14 #include <crypto/cryptlib.h>
15 #include "internal/cryptlib.h"
16 #include "internal/rcu.h"
17 #include "rcu_internal.h"
23 #if defined(__apple_build_version__) && __apple_build_version__ < 6000000
25 * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
26 * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
27 * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
28 * All of this makes impossible to use __atomic_is_lock_free here.
30 * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
32 # define BROKEN_CLANG_ATOMICS
35 #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
37 # if defined(OPENSSL_SYS_UNIX)
38 # include <sys/types.h>
44 # ifdef PTHREAD_RWLOCK_INITIALIZER
49 * For all GNU/clang atomic builtins, we also need fallbacks, to cover all
52 * Unfortunately, we can't do that with some "generic type", because there's no
53 * guarantee that the chosen generic type is large enough to cover all cases.
54 * Therefore, we implement fallbacks for each applicable type, with composed
55 * names that include the type they handle.
57 * (an anecdote: we previously tried to use |void *| as the generic type, with
58 * the thought that the pointer itself is the largest type. However, this is
59 * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
61 * All applicable ATOMIC_ macros take the intended type as first parameter, so
62 * they can map to the correct fallback function. In the GNU/clang case, that
63 * parameter is simply ignored.
67 * Internal types used with the ATOMIC_ macros, to make it possible to compose
68 * fallback function names.
71 typedef struct rcu_cb_item *prcu_cb_item;
73 # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
74 && !defined(USE_ATOMIC_FALLBACKS)
75 # if defined(__APPLE__) && defined(__clang__) && defined(__aarch64__)
77 * For pointers, Apple M1 virtualized cpu seems to have some problem using the
78 * ldapr instruction (see https://github.com/openssl/openssl/pull/23974)
79 * When using the native apple clang compiler, this instruction is emitted for
80 * atomic loads, which is bad. So, if
81 * 1) We are building on a target that defines __APPLE__ AND
82 * 2) We are building on a target using clang (__clang__) AND
83 * 3) We are building for an M1 processor (__aarch64__)
84 * Then we shold not use __atomic_load_n and instead implement our own
85 * function to issue the ldar instruction instead, which procuces the proper
86 * sequencing guarantees
88 static inline void *apple_atomic_load_n_pvoid(void **p,
89 ossl_unused int memorder)
93 __asm volatile("ldar %0, [%1]" : "=r" (ret): "r" (p):);
98 /* For uint64_t, we should be fine, though */
99 # define apple_atomic_load_n_uint64_t(p, o) __atomic_load_n(p, o)
101 # define ATOMIC_LOAD_N(t, p, o) apple_atomic_load_n_##t(p, o)
103 # define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
105 # define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
106 # define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
107 # define ATOMIC_EXCHANGE_N(t, p, v, o) __atomic_exchange_n(p, v, o)
108 # define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
109 # define ATOMIC_FETCH_ADD(p, v, o) __atomic_fetch_add(p, v, o)
110 # define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
111 # define ATOMIC_AND_FETCH(p, m, o) __atomic_and_fetch(p, m, o)
112 # define ATOMIC_OR_FETCH(p, m, o) __atomic_or_fetch(p, m, o)
114 static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
116 # define IMPL_fallback_atomic_load_n(t) \
117 static inline t fallback_atomic_load_n_##t(t *p) \
121 pthread_mutex_lock(&atomic_sim_lock); \
123 pthread_mutex_unlock(&atomic_sim_lock); \
126 IMPL_fallback_atomic_load_n(uint64_t)
127 IMPL_fallback_atomic_load_n(pvoid)
129 # define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
131 # define IMPL_fallback_atomic_store_n(t) \
132 static inline t fallback_atomic_store_n_##t(t *p, t v) \
136 pthread_mutex_lock(&atomic_sim_lock); \
139 pthread_mutex_unlock(&atomic_sim_lock); \
142 IMPL_fallback_atomic_store_n(uint64_t)
144 # define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
146 # define IMPL_fallback_atomic_store(t) \
147 static inline void fallback_atomic_store_##t(t *p, t *v) \
149 pthread_mutex_lock(&atomic_sim_lock); \
151 pthread_mutex_unlock(&atomic_sim_lock); \
153 IMPL_fallback_atomic_store(uint64_t)
154 IMPL_fallback_atomic_store(pvoid)
156 # define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
158 # define IMPL_fallback_atomic_exchange_n(t) \
159 static inline t fallback_atomic_exchange_n_##t(t *p, t v) \
163 pthread_mutex_lock(&atomic_sim_lock); \
166 pthread_mutex_unlock(&atomic_sim_lock); \
169 IMPL_fallback_atomic_exchange_n(uint64_t)
170 IMPL_fallback_atomic_exchange_n(prcu_cb_item)
172 # define ATOMIC_EXCHANGE_N(t, p, v, o) fallback_atomic_exchange_n_##t(p, v)
175 * The fallbacks that follow don't need any per type implementation, as
176 * they are designed for uint64_t only. If there comes a time when multiple
177 * types need to be covered, it's relatively easy to refactor them the same
178 * way as the fallbacks above.
181 static inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
185 pthread_mutex_lock(&atomic_sim_lock);
188 pthread_mutex_unlock(&atomic_sim_lock);
192 # define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
194 static inline uint64_t fallback_atomic_fetch_add(uint64_t *p, uint64_t v)
198 pthread_mutex_lock(&atomic_sim_lock);
201 pthread_mutex_unlock(&atomic_sim_lock);
205 # define ATOMIC_FETCH_ADD(p, v, o) fallback_atomic_fetch_add(p, v)
207 static inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
211 pthread_mutex_lock(&atomic_sim_lock);
214 pthread_mutex_unlock(&atomic_sim_lock);
218 # define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
220 static inline uint64_t fallback_atomic_and_fetch(uint64_t *p, uint64_t m)
224 pthread_mutex_lock(&atomic_sim_lock);
227 pthread_mutex_unlock(&atomic_sim_lock);
231 # define ATOMIC_AND_FETCH(p, v, o) fallback_atomic_and_fetch(p, v)
233 static inline uint64_t fallback_atomic_or_fetch(uint64_t *p, uint64_t m)
237 pthread_mutex_lock(&atomic_sim_lock);
240 pthread_mutex_unlock(&atomic_sim_lock);
244 # define ATOMIC_OR_FETCH(p, v, o) fallback_atomic_or_fetch(p, v)
247 static CRYPTO_THREAD_LOCAL rcu_thr_key;
250 * users is broken up into 2 parts
251 * bits 0-15 current readers
254 # define READER_SHIFT 0
256 # define READER_SIZE 16
259 # define READER_MASK (((uint64_t)1 << READER_SIZE) - 1)
260 # define ID_MASK (((uint64_t)1 << ID_SIZE) - 1)
261 # define READER_COUNT(x) (((uint64_t)(x) >> READER_SHIFT) & READER_MASK)
262 # define ID_VAL(x) (((uint64_t)(x) >> ID_SHIFT) & ID_MASK)
263 # define VAL_READER ((uint64_t)1 << READER_SHIFT)
264 # define VAL_ID(x) ((uint64_t)x << ID_SHIFT)
267 * This is the core of an rcu lock. It tracks the readers and writers for the
268 * current quiescence point for a given lock. Users is the 64 bit value that
269 * stores the READERS/ID as defined above
279 CRYPTO_RCU_LOCK *lock;
284 * This is the per thread tracking data
285 * that is assigned to each thread participating
288 * qp points to the qp that it last acquired
291 struct rcu_thr_data {
292 struct thread_qp thread_qps[MAX_QPS];
296 * This is the internal version of a CRYPTO_RCU_LOCK
297 * it is cast from CRYPTO_RCU_LOCK
300 /* Callbacks to call for next ossl_synchronize_rcu */
301 struct rcu_cb_item *cb_items;
303 /* rcu generation counter for in-order retirement */
306 /* Array of quiescent points for synchronization */
307 struct rcu_qp *qp_group;
309 /* Number of elements in qp_group array */
312 /* Index of the current qp in the qp_group array */
315 /* value of the next id_ctr value to be retired */
316 uint32_t next_to_retire;
318 /* index of the next free rcu_qp in the qp_group */
319 uint64_t current_alloc_idx;
321 /* number of qp's in qp_group array currently being retired */
322 uint32_t writers_alloced;
324 /* lock protecting write side operations */
325 pthread_mutex_t write_lock;
327 /* lock protecting updates to writers_alloced/current_alloc_idx */
328 pthread_mutex_t alloc_lock;
330 /* signal to wake threads waiting on alloc_lock */
331 pthread_cond_t alloc_signal;
333 /* lock to enforce in-order retirement */
334 pthread_mutex_t prior_lock;
336 /* signal to wake threads waiting on prior_lock */
337 pthread_cond_t prior_signal;
341 * Called on thread exit to free the pthread key
342 * associated with this thread, if any
344 static void free_rcu_thr_data(void *ptr)
346 struct rcu_thr_data *data =
347 (struct rcu_thr_data *)CRYPTO_THREAD_get_local(&rcu_thr_key);
350 CRYPTO_THREAD_set_local(&rcu_thr_key, NULL);
353 static void ossl_rcu_init(void)
355 CRYPTO_THREAD_init_local(&rcu_thr_key, NULL);
358 /* Read side acquisition of the current qp */
359 static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
363 /* get the current qp index */
366 * Notes on use of __ATOMIC_ACQUIRE
367 * We need to ensure the following:
368 * 1) That subsequent operations aren't optimized by hoisting them above
369 * this operation. Specifically, we don't want the below re-load of
370 * qp_idx to get optimized away
371 * 2) We want to ensure that any updating of reader_idx on the write side
372 * of the lock is flushed from a local cpu cache so that we see any
373 * updates prior to the load. This is a non-issue on cache coherent
374 * systems like x86, but is relevant on other arches
375 * Note: This applies to the reload below as well
377 qp_idx = ATOMIC_LOAD_N(uint64_t, &lock->reader_idx, __ATOMIC_ACQUIRE);
380 * Notes of use of __ATOMIC_RELEASE
381 * This counter is only read by the write side of the lock, and so we
382 * specify __ATOMIC_RELEASE here to ensure that the write side of the
383 * lock see this during the spin loop read of users, as it waits for the
384 * reader count to approach zero
386 ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, VAL_READER,
389 /* if the idx hasn't changed, we're good, else try again */
390 if (qp_idx == ATOMIC_LOAD_N(uint64_t, &lock->reader_idx, __ATOMIC_ACQUIRE))
394 * Notes on use of __ATOMIC_RELEASE
395 * As with the add above, we want to ensure that this decrement is
396 * seen by the write side of the lock as soon as it happens to prevent
397 * undue spinning waiting for write side completion
399 ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, VAL_READER,
403 return &lock->qp_group[qp_idx];
406 void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
408 struct rcu_thr_data *data;
409 int i, available_qp = -1;
412 * we're going to access current_qp here so ask the
413 * processor to fetch it
415 data = CRYPTO_THREAD_get_local(&rcu_thr_key);
418 data = OPENSSL_zalloc(sizeof(*data));
419 OPENSSL_assert(data != NULL);
420 CRYPTO_THREAD_set_local(&rcu_thr_key, data);
421 ossl_init_thread_start(NULL, NULL, free_rcu_thr_data);
424 for (i = 0; i < MAX_QPS; i++) {
425 if (data->thread_qps[i].qp == NULL && available_qp == -1)
427 /* If we have a hold on this lock already, we're good */
428 if (data->thread_qps[i].lock == lock) {
429 data->thread_qps[i].depth++;
435 * if we get here, then we don't have a hold on this lock yet
437 assert(available_qp != -1);
439 data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
440 data->thread_qps[available_qp].depth = 1;
441 data->thread_qps[available_qp].lock = lock;
444 void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
447 struct rcu_thr_data *data = CRYPTO_THREAD_get_local(&rcu_thr_key);
450 assert(data != NULL);
452 for (i = 0; i < MAX_QPS; i++) {
453 if (data->thread_qps[i].lock == lock) {
455 * As with read side acquisition, we use __ATOMIC_RELEASE here
456 * to ensure that the decrement is published immediately
457 * to any write side waiters
459 data->thread_qps[i].depth--;
460 if (data->thread_qps[i].depth == 0) {
461 ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users, VAL_READER,
463 OPENSSL_assert(ret != UINT64_MAX);
464 data->thread_qps[i].qp = NULL;
465 data->thread_qps[i].lock = NULL;
471 * If we get here, we're trying to unlock a lock that we never acquired -
478 * Write side allocation routine to get the current qp
479 * and replace it with a new one
481 static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock)
484 uint64_t current_idx;
486 pthread_mutex_lock(&lock->alloc_lock);
489 * we need at least one qp to be available with one
490 * left over, so that readers can start working on
491 * one that isn't yet being waited on
493 while (lock->group_count - lock->writers_alloced < 2)
494 /* we have to wait for one to be free */
495 pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
497 current_idx = lock->current_alloc_idx;
499 /* Allocate the qp */
500 lock->writers_alloced++;
502 /* increment the allocation index */
503 lock->current_alloc_idx =
504 (lock->current_alloc_idx + 1) % lock->group_count;
506 /* get and insert a new id */
507 new_id = lock->id_ctr;
510 new_id = VAL_ID(new_id);
512 * Even though we are under a write side lock here
513 * We need to use atomic instructions to ensure that the results
514 * of this update are published to the read side prior to updating the
517 ATOMIC_AND_FETCH(&lock->qp_group[current_idx].users, ID_MASK,
519 ATOMIC_OR_FETCH(&lock->qp_group[current_idx].users, new_id,
523 * Update the reader index to be the prior qp.
524 * Note the use of __ATOMIC_RELEASE here is based on the corresponding use
525 * of __ATOMIC_ACQUIRE in get_hold_current_qp, as we want any publication
526 * of this value to be seen on the read side immediately after it happens
528 ATOMIC_STORE_N(uint64_t, &lock->reader_idx, lock->current_alloc_idx,
531 /* wake up any waiters */
532 pthread_cond_signal(&lock->alloc_signal);
533 pthread_mutex_unlock(&lock->alloc_lock);
534 return &lock->qp_group[current_idx];
537 static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
539 pthread_mutex_lock(&lock->alloc_lock);
540 lock->writers_alloced--;
541 pthread_cond_signal(&lock->alloc_signal);
542 pthread_mutex_unlock(&lock->alloc_lock);
545 static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
549 OPENSSL_zalloc(sizeof(*new) * count);
551 lock->group_count = count;
555 void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
557 pthread_mutex_lock(&lock->write_lock);
560 void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
562 pthread_mutex_unlock(&lock->write_lock);
565 void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
569 struct rcu_cb_item *cb_items, *tmpcb;
572 * __ATOMIC_ACQ_REL is used here to ensure that we get any prior published
573 * writes before we read, and publish our write immediately
575 cb_items = ATOMIC_EXCHANGE_N(prcu_cb_item, &lock->cb_items, NULL,
578 qp = update_qp(lock);
581 * wait for the reader count to reach zero
582 * Note the use of __ATOMIC_ACQUIRE here to ensure that any
583 * prior __ATOMIC_RELEASE write operation in get_hold_current_qp
584 * is visible prior to our read
587 count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
588 } while (READER_COUNT(count) != 0);
590 /* retire in order */
591 pthread_mutex_lock(&lock->prior_lock);
592 while (lock->next_to_retire != ID_VAL(count))
593 pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
594 lock->next_to_retire++;
595 pthread_cond_broadcast(&lock->prior_signal);
596 pthread_mutex_unlock(&lock->prior_lock);
600 /* handle any callbacks that we have */
601 while (cb_items != NULL) {
603 cb_items = cb_items->next;
604 tmpcb->fn(tmpcb->data);
609 int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
611 struct rcu_cb_item *new =
612 OPENSSL_zalloc(sizeof(*new));
620 * Use __ATOMIC_ACQ_REL here to indicate that any prior writes to this
621 * list are visible to us prior to reading, and publish the new value
624 new->next = ATOMIC_EXCHANGE_N(prcu_cb_item, &lock->cb_items, new,
630 void *ossl_rcu_uptr_deref(void **p)
632 return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);
635 void ossl_rcu_assign_uptr(void **p, void **v)
637 ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);
640 static CRYPTO_ONCE rcu_init_once = CRYPTO_ONCE_STATIC_INIT;
642 CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers)
644 struct rcu_lock_st *new;
646 if (!CRYPTO_THREAD_run_once(&rcu_init_once, ossl_rcu_init))
652 new = OPENSSL_zalloc(sizeof(*new));
656 pthread_mutex_init(&new->write_lock, NULL);
657 pthread_mutex_init(&new->prior_lock, NULL);
658 pthread_mutex_init(&new->alloc_lock, NULL);
659 pthread_cond_init(&new->prior_signal, NULL);
660 pthread_cond_init(&new->alloc_signal, NULL);
661 new->qp_group = allocate_new_qp_group(new, num_writers + 1);
662 if (new->qp_group == NULL) {
669 void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
671 struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
676 /* make sure we're synchronized */
677 ossl_synchronize_rcu(rlock);
679 OPENSSL_free(rlock->qp_group);
680 /* There should only be a single qp left now */
684 CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
689 if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
690 /* Don't set error, to avoid recursion blowup. */
693 if (pthread_rwlock_init(lock, NULL) != 0) {
698 pthread_mutexattr_t attr;
701 if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
702 /* Don't set error, to avoid recursion blowup. */
706 * We don't use recursive mutexes, but try to catch errors if we do.
708 pthread_mutexattr_init(&attr);
709 # if !defined (__TANDEM) && !defined (_SPT_MODEL_)
710 # if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
711 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
714 /* The SPT Thread Library does not define MUTEX attributes. */
717 if (pthread_mutex_init(lock, &attr) != 0) {
718 pthread_mutexattr_destroy(&attr);
723 pthread_mutexattr_destroy(&attr);
729 __owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
732 if (pthread_rwlock_rdlock(lock) != 0)
735 if (pthread_mutex_lock(lock) != 0) {
736 assert(errno != EDEADLK && errno != EBUSY);
744 __owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
747 if (pthread_rwlock_wrlock(lock) != 0)
750 if (pthread_mutex_lock(lock) != 0) {
751 assert(errno != EDEADLK && errno != EBUSY);
759 int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
762 if (pthread_rwlock_unlock(lock) != 0)
765 if (pthread_mutex_unlock(lock) != 0) {
766 assert(errno != EPERM);
774 void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
780 pthread_rwlock_destroy(lock);
782 pthread_mutex_destroy(lock);
789 int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
791 if (pthread_once(once, init) != 0)
797 int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
799 if (pthread_key_create(key, cleanup) != 0)
805 void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
807 return pthread_getspecific(*key);
810 int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
812 if (pthread_setspecific(*key, val) != 0)
818 int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
820 if (pthread_key_delete(*key) != 0)
826 CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
828 return pthread_self();
831 int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
833 return pthread_equal(a, b);
836 int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
838 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
839 if (__atomic_is_lock_free(sizeof(*val), val)) {
840 *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
843 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
844 /* This will work for all future Solaris versions. */
846 *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
850 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
856 if (!CRYPTO_THREAD_unlock(lock))
862 int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
865 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
866 if (__atomic_is_lock_free(sizeof(*val), val)) {
867 *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
870 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
871 /* This will work for all future Solaris versions. */
873 *ret = atomic_or_64_nv(val, op);
877 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
882 if (!CRYPTO_THREAD_unlock(lock))
888 int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
890 # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
891 if (__atomic_is_lock_free(sizeof(*val), val)) {
892 __atomic_load(val, ret, __ATOMIC_ACQUIRE);
895 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
896 /* This will work for all future Solaris versions. */
898 *ret = atomic_or_64_nv(val, 0);
902 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
905 if (!CRYPTO_THREAD_unlock(lock))
911 int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
913 # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
914 if (__atomic_is_lock_free(sizeof(*val), val)) {
915 __atomic_load(val, ret, __ATOMIC_ACQUIRE);
918 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
919 /* This will work for all future Solaris versions. */
921 *ret = (int *)atomic_or_uint_nv((unsigned int *)val, 0);
925 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
928 if (!CRYPTO_THREAD_unlock(lock))
935 int openssl_init_fork_handlers(void)
939 # endif /* FIPS_MODULE */
941 int openssl_get_fork_id(void)