+static int ssleay_rand_seed(const void *buf, int num);
+static int ssleay_rand_add(const void *buf, int num, double add_entropy);
+static int ssleay_rand_bytes(unsigned char *buf, int num, int pseudo);
+static int ssleay_rand_nopseudo_bytes(unsigned char *buf, int num);
+#ifndef OPENSSL_NO_DEPRECATED
+static int ssleay_rand_pseudo_bytes(unsigned char *buf, int num);
+#endif
+static int ssleay_rand_status(void);
+
+static RAND_METHOD rand_ssleay_meth = {
+ ssleay_rand_seed,
+ ssleay_rand_nopseudo_bytes,
+ ssleay_rand_cleanup,
+ ssleay_rand_add,
+#ifndef OPENSSL_NO_DEPRECATED
+ ssleay_rand_pseudo_bytes,
+#else
+ NULL,
+#endif
+ ssleay_rand_status
+};
+
+RAND_METHOD *RAND_SSLeay(void)
+{
+ return (&rand_ssleay_meth);
+}
+
+static void ssleay_rand_cleanup(void)
+{
+ OPENSSL_cleanse(state, sizeof(state));
+ state_num = 0;
+ state_index = 0;
+ OPENSSL_cleanse(md, MD_DIGEST_LENGTH);
+ md_count[0] = 0;
+ md_count[1] = 0;
+ entropy = 0;
+ initialized = 0;
+}
+
+static int ssleay_rand_add(const void *buf, int num, double add)
+{
+ int i, j, k, st_idx;
+ long md_c[2];
+ unsigned char local_md[MD_DIGEST_LENGTH];
+ EVP_MD_CTX m;
+ int do_not_lock;
+ int rv = 0;
+
+ if (!num)
+ return 1;
+
+ /*
+ * (Based on the rand(3) manpage)
+ *
+ * The input is chopped up into units of 20 bytes (or less for
+ * the last block). Each of these blocks is run through the hash
+ * function as follows: The data passed to the hash function
+ * is the current 'md', the same number of bytes from the 'state'
+ * (the location determined by in incremented looping index) as
+ * the current 'block', the new key data 'block', and 'count'
+ * (which is incremented after each use).
+ * The result of this is kept in 'md' and also xored into the
+ * 'state' at the same locations that were used as input into the
+ * hash function.
+ */
+
+ EVP_MD_CTX_init(&m);
+ /* check if we already have the lock */
+ if (crypto_lock_rand) {
+ CRYPTO_THREADID cur;
+ CRYPTO_THREADID_current(&cur);
+ CRYPTO_r_lock(CRYPTO_LOCK_RAND2);
+ do_not_lock = !CRYPTO_THREADID_cmp(&locking_threadid, &cur);
+ CRYPTO_r_unlock(CRYPTO_LOCK_RAND2);
+ } else
+ do_not_lock = 0;
+
+ if (!do_not_lock)
+ CRYPTO_w_lock(CRYPTO_LOCK_RAND);
+ st_idx = state_index;
+
+ /*
+ * use our own copies of the counters so that even if a concurrent thread
+ * seeds with exactly the same data and uses the same subarray there's
+ * _some_ difference
+ */
+ md_c[0] = md_count[0];
+ md_c[1] = md_count[1];
+
+ memcpy(local_md, md, sizeof md);
+
+ /* state_index <= state_num <= STATE_SIZE */
+ state_index += num;
+ if (state_index >= STATE_SIZE) {
+ state_index %= STATE_SIZE;
+ state_num = STATE_SIZE;
+ } else if (state_num < STATE_SIZE) {
+ if (state_index > state_num)
+ state_num = state_index;
+ }
+ /* state_index <= state_num <= STATE_SIZE */
+
+ /*
+ * state[st_idx], ..., state[(st_idx + num - 1) % STATE_SIZE] are what we
+ * will use now, but other threads may use them as well
+ */
+
+ md_count[1] += (num / MD_DIGEST_LENGTH) + (num % MD_DIGEST_LENGTH > 0);
+
+ if (!do_not_lock)
+ CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
+
+ for (i = 0; i < num; i += MD_DIGEST_LENGTH) {
+ j = (num - i);
+ j = (j > MD_DIGEST_LENGTH) ? MD_DIGEST_LENGTH : j;
+
+ if (!MD_Init(&m))
+ goto err;
+ if (!MD_Update(&m, local_md, MD_DIGEST_LENGTH))
+ goto err;
+ k = (st_idx + j) - STATE_SIZE;
+ if (k > 0) {
+ if (!MD_Update(&m, &(state[st_idx]), j - k))
+ goto err;
+ if (!MD_Update(&m, &(state[0]), k))
+ goto err;
+ } else if (!MD_Update(&m, &(state[st_idx]), j))
+ goto err;
+
+ /* DO NOT REMOVE THE FOLLOWING CALL TO MD_Update()! */
+ if (!MD_Update(&m, buf, j))
+ goto err;
+ /*
+ * We know that line may cause programs such as purify and valgrind
+ * to complain about use of uninitialized data. The problem is not,
+ * it's with the caller. Removing that line will make sure you get
+ * really bad randomness and thereby other problems such as very
+ * insecure keys.
+ */
+
+ if (!MD_Update(&m, (unsigned char *)&(md_c[0]), sizeof(md_c)))
+ goto err;
+ if (!MD_Final(&m, local_md))
+ goto err;
+ md_c[1]++;
+
+ buf = (const char *)buf + j;
+
+ for (k = 0; k < j; k++) {
+ /*
+ * Parallel threads may interfere with this, but always each byte
+ * of the new state is the XOR of some previous value of its and
+ * local_md (itermediate values may be lost). Alway using locking
+ * could hurt performance more than necessary given that
+ * conflicts occur only when the total seeding is longer than the
+ * random state.
+ */
+ state[st_idx++] ^= local_md[k];
+ if (st_idx >= STATE_SIZE)
+ st_idx = 0;
+ }
+ }
+
+ if (!do_not_lock)
+ CRYPTO_w_lock(CRYPTO_LOCK_RAND);
+ /*
+ * Don't just copy back local_md into md -- this could mean that other
+ * thread's seeding remains without effect (except for the incremented
+ * counter). By XORing it we keep at least as much entropy as fits into
+ * md.
+ */
+ for (k = 0; k < (int)sizeof(md); k++) {
+ md[k] ^= local_md[k];
+ }
+ if (entropy < ENTROPY_NEEDED) /* stop counting when we have enough */
+ entropy += add;
+ if (!do_not_lock)
+ CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
+
+#if !defined(OPENSSL_THREADS) && !defined(OPENSSL_SYS_WIN32)
+ assert(md_c[1] == md_count[1]);