Coverage Report

Created: 2025-06-10 13:21

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/bitcoin/src/random.cpp
Line
Count
Source
1
// Copyright (c) 2009-2010 Satoshi Nakamoto
2
// Copyright (c) 2009-present The Bitcoin Core developers
3
// Distributed under the MIT software license, see the accompanying
4
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
5
6
#include <bitcoin-build-config.h> // IWYU pragma: keep
7
8
#include <random.h>
9
10
#include <compat/compat.h>
11
#include <compat/cpuid.h>
12
#include <crypto/chacha20.h>
13
#include <crypto/sha256.h>
14
#include <crypto/sha512.h>
15
#include <logging.h>
16
#include <randomenv.h>
17
#include <span.h>
18
#include <support/allocators/secure.h>
19
#include <support/cleanse.h>
20
#include <sync.h>
21
#include <util/time.h>
22
23
#include <array>
24
#include <cmath>
25
#include <cstdlib>
26
#include <optional>
27
#include <thread>
28
29
#ifdef WIN32
30
#include <bcrypt.h>
31
#else
32
#include <fcntl.h>
33
#include <sys/time.h>
34
#endif
35
36
#if defined(HAVE_GETRANDOM) || (defined(HAVE_GETENTROPY_RAND) && defined(__APPLE__))
37
#include <sys/random.h>
38
#endif
39
40
#ifdef HAVE_SYSCTL_ARND
41
#include <sys/sysctl.h>
42
#endif
43
44
namespace {
45
46
/* Number of random bytes returned by GetOSRand.
47
 * When changing this constant make sure to change all call sites, and make
48
 * sure that the underlying OS APIs for all platforms support the number.
49
 * (many cap out at 256 bytes).
50
 */
51
static const int NUM_OS_RANDOM_BYTES = 32;
52
53
54
[[noreturn]] void RandFailure()
55
0
{
56
0
    LogError("Failed to read randomness, aborting\n");
57
0
    std::abort();
58
0
}
59
60
inline int64_t GetPerformanceCounter() noexcept
61
11.6M
{
62
    // Read the hardware time stamp counter when available.
63
    // See https://en.wikipedia.org/wiki/Time_Stamp_Counter for more information.
64
#if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
65
    return __rdtsc();
66
#elif !defined(_MSC_VER) && defined(__i386__)
67
    uint64_t r = 0;
68
    __asm__ volatile ("rdtsc" : "=A"(r)); // Constrain the r variable to the eax:edx pair.
69
    return r;
70
#elif !defined(_MSC_VER) && (defined(__x86_64__) || defined(__amd64__))
71
    uint64_t r1 = 0, r2 = 0;
72
11.6M
    __asm__ volatile ("rdtsc" : "=a"(r1), "=d"(r2)); // Constrain r1 to rax and r2 to rdx.
73
11.6M
    return (r2 << 32) | r1;
74
#else
75
    // Fall back to using standard library clock (usually microsecond or nanosecond precision)
76
    return std::chrono::high_resolution_clock::now().time_since_epoch().count();
77
#endif
78
11.6M
}
79
80
#ifdef HAVE_GETCPUID
81
bool g_rdrand_supported = false;
82
bool g_rdseed_supported = false;
83
constexpr uint32_t CPUID_F1_ECX_RDRAND = 0x40000000;
84
constexpr uint32_t CPUID_F7_EBX_RDSEED = 0x00040000;
85
#ifdef bit_RDRND
86
static_assert(CPUID_F1_ECX_RDRAND == bit_RDRND, "Unexpected value for bit_RDRND");
87
#endif
88
#ifdef bit_RDSEED
89
static_assert(CPUID_F7_EBX_RDSEED == bit_RDSEED, "Unexpected value for bit_RDSEED");
90
#endif
91
92
void InitHardwareRand()
93
11.0k
{
94
11.0k
    uint32_t eax, ebx, ecx, edx;
95
11.0k
    GetCPUID(1, 0, eax, ebx, ecx, edx);
96
11.0k
    if (ecx & CPUID_F1_ECX_RDRAND) {
  Branch (96:9): [True: 11.0k, False: 0]
97
11.0k
        g_rdrand_supported = true;
98
11.0k
    }
99
11.0k
    GetCPUID(7, 0, eax, ebx, ecx, edx);
100
11.0k
    if (ebx & CPUID_F7_EBX_RDSEED) {
  Branch (100:9): [True: 11.0k, False: 0]
101
11.0k
        g_rdseed_supported = true;
102
11.0k
    }
103
11.0k
}
104
105
void ReportHardwareRand()
106
11.0k
{
107
    // This must be done in a separate function, as InitHardwareRand() may be indirectly called
108
    // from global constructors, before logging is initialized.
109
11.0k
    if (g_rdseed_supported) {
  Branch (109:9): [True: 11.0k, False: 0]
110
11.0k
        LogPrintf("Using RdSeed as an additional entropy source\n");
111
11.0k
    }
112
11.0k
    if (g_rdrand_supported) {
  Branch (112:9): [True: 11.0k, False: 0]
113
11.0k
        LogPrintf("Using RdRand as an additional entropy source\n");
114
11.0k
    }
115
11.0k
}
116
117
/** Read 64 bits of entropy using rdrand.
118
 *
119
 * Must only be called when RdRand is supported.
120
 */
121
uint64_t GetRdRand() noexcept
122
221k
{
123
    // RdRand may very rarely fail. Invoke it up to 10 times in a loop to reduce this risk.
124
#ifdef __i386__
125
    uint8_t ok = 0;
126
    // Initialize to 0 to silence a compiler warning that r1 or r2 may be used
127
    // uninitialized. Even if rdrand fails (!ok) it will set the output to 0,
128
    // but there is no way that the compiler could know that.
129
    uint32_t r1 = 0, r2 = 0;
130
    for (int i = 0; i < 10; ++i) {
131
        __asm__ volatile (".byte 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdrand %eax
132
        if (ok) break;
133
    }
134
    for (int i = 0; i < 10; ++i) {
135
        __asm__ volatile (".byte 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r2), "=q"(ok) :: "cc"); // rdrand %eax
136
        if (ok) break;
137
    }
138
    return (((uint64_t)r2) << 32) | r1;
139
#elif defined(__x86_64__) || defined(__amd64__)
140
    uint8_t ok = 0;
141
221k
    uint64_t r1 = 0; // See above why we initialize to 0.
142
221k
    for (int i = 0; i < 10; ++i) {
  Branch (142:21): [True: 221k, False: 0]
143
221k
        __asm__ volatile (".byte 0x48, 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdrand %rax
144
221k
        if (ok) break;
  Branch (144:13): [True: 221k, False: 0]
145
221k
    }
146
221k
    return r1;
147
#else
148
#error "RdRand is only supported on x86 and x86_64"
149
#endif
150
221k
}
151
152
/** Read 64 bits of entropy using rdseed.
153
 *
154
 * Must only be called when RdSeed is supported.
155
 */
156
uint64_t GetRdSeed() noexcept
157
44.3k
{
158
    // RdSeed may fail when the HW RNG is overloaded. Loop indefinitely until enough entropy is gathered,
159
    // but pause after every failure.
160
#ifdef __i386__
161
    uint8_t ok = 0;
162
    uint32_t r1, r2;
163
    do {
164
        __asm__ volatile (".byte 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdseed %eax
165
        if (ok) break;
166
        __asm__ volatile ("pause");
167
    } while(true);
168
    do {
169
        __asm__ volatile (".byte 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r2), "=q"(ok) :: "cc"); // rdseed %eax
170
        if (ok) break;
171
        __asm__ volatile ("pause");
172
    } while(true);
173
    return (((uint64_t)r2) << 32) | r1;
174
#elif defined(__x86_64__) || defined(__amd64__)
175
    uint8_t ok;
176
44.3k
    uint64_t r1;
177
139k
    do {
178
139k
        __asm__ volatile (".byte 0x48, 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdseed %rax
179
139k
        if (ok) break;
  Branch (179:13): [True: 44.3k, False: 94.6k]
180
94.6k
        __asm__ volatile ("pause");
181
94.6k
    } while(true);
  Branch (181:13): [Folded - Ignored]
182
0
    return r1;
183
#else
184
#error "RdSeed is only supported on x86 and x86_64"
185
#endif
186
44.3k
}
187
188
#else
189
/* Access to other hardware random number generators could be added here later,
190
 * assuming it is sufficiently fast (in the order of a few hundred CPU cycles).
191
 * Slower sources should probably be invoked separately, and/or only from
192
 * RandAddPeriodic (which is called once a minute).
193
 */
194
void InitHardwareRand() {}
195
void ReportHardwareRand() {}
196
#endif
197
198
/** Add 64 bits of entropy gathered from hardware to hasher. Do nothing if not supported. */
199
221k
void SeedHardwareFast(CSHA512& hasher) noexcept {
200
221k
#if defined(__x86_64__) || defined(__amd64__) || defined(__i386__)
201
221k
    if (g_rdrand_supported) {
  Branch (201:9): [True: 221k, False: 0]
202
221k
        uint64_t out = GetRdRand();
203
221k
        hasher.Write((const unsigned char*)&out, sizeof(out));
204
221k
        return;
205
221k
    }
206
221k
#endif
207
221k
}
208
209
/** Add 256 bits of entropy gathered from hardware to hasher. Do nothing if not supported. */
210
11.0k
void SeedHardwareSlow(CSHA512& hasher) noexcept {
211
11.0k
#if defined(__x86_64__) || defined(__amd64__) || defined(__i386__)
212
    // When we want 256 bits of entropy, prefer RdSeed over RdRand, as it's
213
    // guaranteed to produce independent randomness on every call.
214
11.0k
    if (g_rdseed_supported) {
  Branch (214:9): [True: 11.0k, False: 0]
215
55.4k
        for (int i = 0; i < 4; ++i) {
  Branch (215:25): [True: 44.3k, False: 11.0k]
216
44.3k
            uint64_t out = GetRdSeed();
217
44.3k
            hasher.Write((const unsigned char*)&out, sizeof(out));
218
44.3k
        }
219
11.0k
        return;
220
11.0k
    }
221
    // When falling back to RdRand, XOR the result of 1024 results.
222
    // This guarantees a reseeding occurs between each.
223
0
    if (g_rdrand_supported) {
  Branch (223:9): [True: 0, False: 0]
224
0
        for (int i = 0; i < 4; ++i) {
  Branch (224:25): [True: 0, False: 0]
225
0
            uint64_t out = 0;
226
0
            for (int j = 0; j < 1024; ++j) out ^= GetRdRand();
  Branch (226:29): [True: 0, False: 0]
227
0
            hasher.Write((const unsigned char*)&out, sizeof(out));
228
0
        }
229
0
        return;
230
0
    }
231
0
#endif
232
0
}
233
234
/** Use repeated SHA512 to strengthen the randomness in seed32, and feed into hasher. */
235
void Strengthen(const unsigned char (&seed)[32], SteadyClock::duration dur, CSHA512& hasher) noexcept
236
11.0k
{
237
11.0k
    CSHA512 inner_hasher;
238
11.0k
    inner_hasher.Write(seed, sizeof(seed));
239
240
    // Hash loop
241
11.0k
    unsigned char buffer[64];
242
11.0k
    const auto stop{SteadyClock::now() + dur};
243
5.73M
    do {
244
5.73G
        for (int i = 0; i < 1000; ++i) {
  Branch (244:25): [True: 5.73G, False: 5.73M]
245
5.73G
            inner_hasher.Finalize(buffer);
246
5.73G
            inner_hasher.Reset();
247
5.73G
            inner_hasher.Write(buffer, sizeof(buffer));
248
5.73G
        }
249
        // Benchmark operation and feed it into outer hasher.
250
5.73M
        int64_t perf = GetPerformanceCounter();
251
5.73M
        hasher.Write((const unsigned char*)&perf, sizeof(perf));
252
5.73M
    } while (SteadyClock::now() < stop);
  Branch (252:14): [True: 5.71M, False: 11.0k]
253
254
    // Produce output from inner state and feed it to outer hasher.
255
11.0k
    inner_hasher.Finalize(buffer);
256
11.0k
    hasher.Write(buffer, sizeof(buffer));
257
    // Try to clean up.
258
11.0k
    inner_hasher.Reset();
259
11.0k
    memory_cleanse(buffer, sizeof(buffer));
260
11.0k
}
261
262
#ifndef WIN32
263
/** Fallback: get 32 bytes of system entropy from /dev/urandom. The most
264
 * compatible way to get cryptographic randomness on UNIX-ish platforms.
265
 */
266
[[maybe_unused]] void GetDevURandom(unsigned char *ent32)
267
0
{
268
0
    int f = open("/dev/urandom", O_RDONLY);
269
0
    if (f == -1) {
270
0
        RandFailure();
271
0
    }
272
0
    int have = 0;
273
0
    do {
274
0
        ssize_t n = read(f, ent32 + have, NUM_OS_RANDOM_BYTES - have);
275
0
        if (n <= 0 || n + have > NUM_OS_RANDOM_BYTES) {
276
0
            close(f);
277
0
            RandFailure();
278
0
        }
279
0
        have += n;
280
0
    } while (have < NUM_OS_RANDOM_BYTES);
281
0
    close(f);
282
0
}
283
#endif
284
285
/** Get 32 bytes of system entropy. */
286
void GetOSRand(unsigned char *ent32)
287
101k
{
288
#if defined(WIN32)
289
    constexpr uint32_t STATUS_SUCCESS{0x00000000};
290
    NTSTATUS status = BCryptGenRandom(/*hAlgorithm=*/NULL,
291
                                      /*pbBuffer=*/ent32,
292
                                      /*cbBuffer=*/NUM_OS_RANDOM_BYTES,
293
                                      /*dwFlags=*/BCRYPT_USE_SYSTEM_PREFERRED_RNG);
294
295
    if (status != STATUS_SUCCESS) {
296
        RandFailure();
297
    }
298
#elif defined(HAVE_GETRANDOM)
299
    /* Linux. From the getrandom(2) man page:
300
     * "If the urandom source has been initialized, reads of up to 256 bytes
301
     * will always return as many bytes as requested and will not be
302
     * interrupted by signals."
303
     */
304
101k
    if (getrandom(ent32, NUM_OS_RANDOM_BYTES, 0) != NUM_OS_RANDOM_BYTES) {
  Branch (304:9): [True: 0, False: 101k]
305
0
        RandFailure();
306
0
    }
307
#elif defined(__OpenBSD__)
308
    /* OpenBSD. From the arc4random(3) man page:
309
       "Use of these functions is encouraged for almost all random number
310
        consumption because the other interfaces are deficient in either
311
        quality, portability, standardization, or availability."
312
       The function call is always successful.
313
     */
314
    arc4random_buf(ent32, NUM_OS_RANDOM_BYTES);
315
#elif defined(HAVE_GETENTROPY_RAND) && defined(__APPLE__)
316
    if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) {
317
        RandFailure();
318
    }
319
#elif defined(HAVE_SYSCTL_ARND)
320
    /* FreeBSD, NetBSD and similar. It is possible for the call to return less
321
     * bytes than requested, so need to read in a loop.
322
     */
323
    static int name[2] = {CTL_KERN, KERN_ARND};
324
    int have = 0;
325
    do {
326
        size_t len = NUM_OS_RANDOM_BYTES - have;
327
        if (sysctl(name, std::size(name), ent32 + have, &len, nullptr, 0) != 0) {
328
            RandFailure();
329
        }
330
        have += len;
331
    } while (have < NUM_OS_RANDOM_BYTES);
332
#else
333
    /* Fall back to /dev/urandom if there is no specific method implemented to
334
     * get system entropy for this OS.
335
     */
336
    GetDevURandom(ent32);
337
#endif
338
101k
}
339
340
class RNGState {
341
    Mutex m_mutex;
342
    /* The RNG state consists of 256 bits of entropy, taken from the output of
343
     * one operation's SHA512 output, and fed as input to the next one.
344
     * Carrying 256 bits of entropy should be sufficient to guarantee
345
     * unpredictability as long as any entropy source was ever unpredictable
346
     * to an attacker. To protect against situations where an attacker might
347
     * observe the RNG's state, fresh entropy is always mixed when
348
     * GetStrongRandBytes is called.
349
     */
350
    unsigned char m_state[32] GUARDED_BY(m_mutex) = {0};
351
    uint64_t m_counter GUARDED_BY(m_mutex) = 0;
352
    bool m_strongly_seeded GUARDED_BY(m_mutex) = false;
353
354
    /** If not nullopt, the output of this RNGState is redirected and drawn from here
355
     *  (unless always_use_real_rng is passed to MixExtract). */
356
    std::optional<ChaCha20> m_deterministic_prng GUARDED_BY(m_mutex);
357
358
    Mutex m_events_mutex;
359
    CSHA256 m_events_hasher GUARDED_BY(m_events_mutex);
360
361
public:
362
    RNGState() noexcept
363
11.0k
    {
364
11.0k
        InitHardwareRand();
365
11.0k
    }
366
367
11.0k
    ~RNGState() = default;
368
369
    void AddEvent(uint32_t event_info) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_events_mutex)
370
5.61M
    {
371
5.61M
        LOCK(m_events_mutex);
372
373
5.61M
        m_events_hasher.Write((const unsigned char *)&event_info, sizeof(event_info));
374
        // Get the low four bytes of the performance counter. This translates to roughly the
375
        // subsecond part.
376
5.61M
        uint32_t perfcounter = (GetPerformanceCounter() & 0xffffffff);
377
5.61M
        m_events_hasher.Write((const unsigned char*)&perfcounter, sizeof(perfcounter));
378
5.61M
    }
379
380
    /**
381
     * Feed (the hash of) all events added through AddEvent() to hasher.
382
     */
383
    void SeedEvents(CSHA512& hasher) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_events_mutex)
384
88.7k
    {
385
        // We use only SHA256 for the events hashing to get the ASM speedups we have for SHA256,
386
        // since we want it to be fast as network peers may be able to trigger it repeatedly.
387
88.7k
        LOCK(m_events_mutex);
388
389
88.7k
        unsigned char events_hash[32];
390
88.7k
        m_events_hasher.Finalize(events_hash);
391
88.7k
        hasher.Write(events_hash, 32);
392
393
        // Re-initialize the hasher with the finalized state to use later.
394
88.7k
        m_events_hasher.Reset();
395
88.7k
        m_events_hasher.Write(events_hash, 32);
396
88.7k
    }
397
398
    /** Make the output of MixExtract (unless always_use_real_rng) deterministic, with specified seed. */
399
    void MakeDeterministic(const uint256& seed) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
400
0
    {
401
0
        LOCK(m_mutex);
402
0
        m_deterministic_prng.emplace(MakeByteSpan(seed));
403
0
    }
404
405
    /** Extract up to 32 bytes of entropy from the RNG state, mixing in new entropy from hasher.
406
     *
407
     * If this function has never been called with strong_seed = true, false is returned.
408
     *
409
     * If always_use_real_rng is false, and MakeDeterministic has been called before, output
410
     * from the deterministic PRNG instead.
411
     */
412
    bool MixExtract(unsigned char* out, size_t num, CSHA512&& hasher, bool strong_seed, bool always_use_real_rng) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
413
244k
    {
414
244k
        assert(num <= 32);
  Branch (414:9): [True: 244k, False: 0]
415
244k
        unsigned char buf[64];
416
244k
        static_assert(sizeof(buf) == CSHA512::OUTPUT_SIZE, "Buffer needs to have hasher's output size");
417
244k
        bool ret;
418
244k
        {
419
244k
            LOCK(m_mutex);
420
244k
            ret = (m_strongly_seeded |= strong_seed);
421
            // Write the current state of the RNG into the hasher
422
244k
            hasher.Write(m_state, 32);
423
            // Write a new counter number into the state
424
244k
            hasher.Write((const unsigned char*)&m_counter, sizeof(m_counter));
425
244k
            ++m_counter;
426
            // Finalize the hasher
427
244k
            hasher.Finalize(buf);
428
            // Store the last 32 bytes of the hash output as new RNG state.
429
244k
            memcpy(m_state, buf + 32, 32);
430
            // Handle requests for deterministic randomness.
431
244k
            if (!always_use_real_rng && m_deterministic_prng.has_value()) [[unlikely]] {
  Branch (431:17): [True: 122k, False: 122k]
  Branch (431:41): [True: 0, False: 122k]
432
                // Overwrite the beginning of buf, which will be used for output.
433
0
                m_deterministic_prng->Keystream(std::as_writable_bytes(std::span{buf, num}));
434
                // Do not require strong seeding for deterministic output.
435
0
                ret = true;
436
0
            }
437
244k
        }
438
        // If desired, copy (up to) the first 32 bytes of the hash output as output.
439
244k
        if (num) {
  Branch (439:13): [True: 210k, False: 33.2k]
440
210k
            assert(out != nullptr);
  Branch (440:13): [True: 210k, False: 0]
441
210k
            memcpy(out, buf, num);
442
210k
        }
443
        // Best effort cleanup of internal state
444
244k
        hasher.Reset();
445
244k
        memory_cleanse(buf, 64);
446
244k
        return ret;
447
244k
    }
448
};
449
450
RNGState& GetRNGState() noexcept
451
5.83M
{
452
    // This idiom relies on the guarantee that static variable are initialized
453
    // on first call, even when multiple parallel calls are permitted.
454
5.83M
    static std::vector<RNGState, secure_allocator<RNGState>> g_rng(1);
455
5.83M
    return g_rng[0];
456
5.83M
}
457
458
/* A note on the use of noexcept in the seeding functions below:
459
 *
460
 * None of the RNG code should ever throw any exception.
461
 */
462
463
void SeedTimestamp(CSHA512& hasher) noexcept
464
310k
{
465
310k
    int64_t perfcounter = GetPerformanceCounter();
466
310k
    hasher.Write((const unsigned char*)&perfcounter, sizeof(perfcounter));
467
310k
}
468
469
void SeedFast(CSHA512& hasher) noexcept
470
221k
{
471
221k
    unsigned char buffer[32];
472
473
    // Stack pointer to indirectly commit to thread/callstack
474
221k
    const unsigned char* ptr = buffer;
475
221k
    hasher.Write((const unsigned char*)&ptr, sizeof(ptr));
476
477
    // Hardware randomness is very fast when available; use it always.
478
221k
    SeedHardwareFast(hasher);
479
480
    // High-precision timestamp
481
221k
    SeedTimestamp(hasher);
482
221k
}
483
484
void SeedSlow(CSHA512& hasher, RNGState& rng) noexcept
485
88.7k
{
486
88.7k
    unsigned char buffer[32];
487
488
    // Everything that the 'fast' seeder includes
489
88.7k
    SeedFast(hasher);
490
491
    // OS randomness
492
88.7k
    GetOSRand(buffer);
493
88.7k
    hasher.Write(buffer, sizeof(buffer));
494
495
    // Add the events hasher into the mix
496
88.7k
    rng.SeedEvents(hasher);
497
498
    // High-precision timestamp.
499
    //
500
    // Note that we also commit to a timestamp in the Fast seeder, so we indirectly commit to a
501
    // benchmark of all the entropy gathering sources in this function).
502
88.7k
    SeedTimestamp(hasher);
503
88.7k
}
504
505
/** Extract entropy from rng, strengthen it, and feed it into hasher. */
506
void SeedStrengthen(CSHA512& hasher, RNGState& rng, SteadyClock::duration dur) noexcept
507
11.0k
{
508
    // Generate 32 bytes of entropy from the RNG, and a copy of the entropy already in hasher.
509
    // Never use the deterministic PRNG for this, as the result is only used internally.
510
11.0k
    unsigned char strengthen_seed[32];
511
11.0k
    rng.MixExtract(strengthen_seed, sizeof(strengthen_seed), CSHA512(hasher), false, /*always_use_real_rng=*/true);
512
    // Strengthen the seed, and feed it into hasher.
513
11.0k
    Strengthen(strengthen_seed, dur, hasher);
514
11.0k
}
515
516
void SeedPeriodic(CSHA512& hasher, RNGState& rng) noexcept
517
0
{
518
    // Everything that the 'fast' seeder includes
519
0
    SeedFast(hasher);
520
521
    // High-precision timestamp
522
0
    SeedTimestamp(hasher);
523
524
    // Add the events hasher into the mix
525
0
    rng.SeedEvents(hasher);
526
527
    // Dynamic environment data (clocks, resource usage, ...)
528
0
    auto old_size = hasher.Size();
529
0
    RandAddDynamicEnv(hasher);
530
0
    LogDebug(BCLog::RAND, "Feeding %i bytes of dynamic environment data into RNG\n", hasher.Size() - old_size);
531
532
    // Strengthen for 10 ms
533
0
    SeedStrengthen(hasher, rng, 10ms);
534
0
}
535
536
void SeedStartup(CSHA512& hasher, RNGState& rng) noexcept
537
11.0k
{
538
    // Gather 256 bits of hardware randomness, if available
539
11.0k
    SeedHardwareSlow(hasher);
540
541
    // Everything that the 'slow' seeder includes.
542
11.0k
    SeedSlow(hasher, rng);
543
544
    // Dynamic environment data (clocks, resource usage, ...)
545
11.0k
    auto old_size = hasher.Size();
546
11.0k
    RandAddDynamicEnv(hasher);
547
548
    // Static environment data
549
11.0k
    RandAddStaticEnv(hasher);
550
11.0k
    LogDebug(BCLog::RAND, "Feeding %i bytes of environment data into RNG\n", hasher.Size() - old_size);
551
552
    // Strengthen for 100 ms
553
11.0k
    SeedStrengthen(hasher, rng, 100ms);
554
11.0k
}
555
556
enum class RNGLevel {
557
    FAST, //!< Automatically called by GetRandBytes
558
    SLOW, //!< Automatically called by GetStrongRandBytes
559
    PERIODIC, //!< Called by RandAddPeriodic()
560
};
561
562
void ProcRand(unsigned char* out, int num, RNGLevel level, bool always_use_real_rng) noexcept
563
210k
{
564
    // Make sure the RNG is initialized first (as all Seed* function possibly need hwrand to be available).
565
210k
    RNGState& rng = GetRNGState();
566
567
210k
    assert(num <= 32);
  Branch (567:5): [True: 210k, False: 0]
568
569
210k
    CSHA512 hasher;
570
210k
    switch (level) {
  Branch (570:13): [True: 0, False: 210k]
571
133k
    case RNGLevel::FAST:
  Branch (571:5): [True: 133k, False: 77.6k]
572
133k
        SeedFast(hasher);
573
133k
        break;
574
77.6k
    case RNGLevel::SLOW:
  Branch (574:5): [True: 77.6k, False: 133k]
575
77.6k
        SeedSlow(hasher, rng);
576
77.6k
        break;
577
0
    case RNGLevel::PERIODIC:
  Branch (577:5): [True: 0, False: 210k]
578
0
        SeedPeriodic(hasher, rng);
579
0
        break;
580
210k
    }
581
582
    // Combine with and update state
583
210k
    if (!rng.MixExtract(out, num, std::move(hasher), false, always_use_real_rng)) {
  Branch (583:9): [True: 11.0k, False: 199k]
584
        // On the first invocation, also seed with SeedStartup().
585
11.0k
        CSHA512 startup_hasher;
586
11.0k
        SeedStartup(startup_hasher, rng);
587
11.0k
        rng.MixExtract(out, num, std::move(startup_hasher), true, always_use_real_rng);
588
11.0k
    }
589
210k
}
590
591
} // namespace
592
593
594
/** Internal function to set g_determinstic_rng. Only accessed from tests. */
595
void MakeRandDeterministicDANGEROUS(const uint256& seed) noexcept
596
0
{
597
0
    GetRNGState().MakeDeterministic(seed);
598
0
}
599
std::atomic<bool> g_used_g_prng{false}; // Only accessed from tests
600
601
void GetRandBytes(std::span<unsigned char> bytes) noexcept
602
122k
{
603
122k
    g_used_g_prng = true;
604
122k
    ProcRand(bytes.data(), bytes.size(), RNGLevel::FAST, /*always_use_real_rng=*/false);
605
122k
}
606
607
void GetStrongRandBytes(std::span<unsigned char> bytes) noexcept
608
77.6k
{
609
77.6k
    ProcRand(bytes.data(), bytes.size(), RNGLevel::SLOW, /*always_use_real_rng=*/true);
610
77.6k
}
611
612
void RandAddPeriodic() noexcept
613
0
{
614
0
    ProcRand(nullptr, 0, RNGLevel::PERIODIC, /*always_use_real_rng=*/false);
615
0
}
616
617
5.61M
void RandAddEvent(const uint32_t event_info) noexcept { GetRNGState().AddEvent(event_info); }
618
619
void FastRandomContext::RandomSeed() noexcept
620
0
{
621
0
    uint256 seed = GetRandHash();
622
0
    rng.SetKey(MakeByteSpan(seed));
623
0
    requires_seed = false;
624
0
}
625
626
void FastRandomContext::fillrand(std::span<std::byte> output) noexcept
627
88.7k
{
628
88.7k
    if (requires_seed) RandomSeed();
  Branch (628:9): [True: 0, False: 88.7k]
629
88.7k
    rng.Keystream(output);
630
88.7k
}
631
632
0
FastRandomContext::FastRandomContext(const uint256& seed) noexcept : requires_seed(false), rng(MakeByteSpan(seed)) {}
633
634
void FastRandomContext::Reseed(const uint256& seed) noexcept
635
0
{
636
0
    FlushCache();
637
0
    requires_seed = false;
638
0
    rng = {MakeByteSpan(seed)};
639
0
}
640
641
bool Random_SanityCheck()
642
11.0k
{
643
11.0k
    uint64_t start = GetPerformanceCounter();
644
645
    /* This does not measure the quality of randomness, but it does test that
646
     * GetOSRand() overwrites all 32 bytes of the output given a maximum
647
     * number of tries.
648
     */
649
11.0k
    static constexpr int MAX_TRIES{1024};
650
11.0k
    uint8_t data[NUM_OS_RANDOM_BYTES];
651
11.0k
    bool overwritten[NUM_OS_RANDOM_BYTES] = {}; /* Tracks which bytes have been overwritten at least once */
652
11.0k
    int num_overwritten;
653
11.0k
    int tries = 0;
654
    /* Loop until all bytes have been overwritten at least once, or max number tries reached */
655
12.3k
    do {
656
12.3k
        memset(data, 0, NUM_OS_RANDOM_BYTES);
657
12.3k
        GetOSRand(data);
658
408k
        for (int x=0; x < NUM_OS_RANDOM_BYTES; ++x) {
  Branch (658:23): [True: 395k, False: 12.3k]
659
395k
            overwritten[x] |= (data[x] != 0);
660
395k
        }
661
662
12.3k
        num_overwritten = 0;
663
408k
        for (int x=0; x < NUM_OS_RANDOM_BYTES; ++x) {
  Branch (663:23): [True: 395k, False: 12.3k]
664
395k
            if (overwritten[x]) {
  Branch (664:17): [True: 394k, False: 1.35k]
665
394k
                num_overwritten += 1;
666
394k
            }
667
395k
        }
668
669
12.3k
        tries += 1;
670
12.3k
    } while (num_overwritten < NUM_OS_RANDOM_BYTES && tries < MAX_TRIES);
  Branch (670:14): [True: 1.28k, False: 11.0k]
  Branch (670:55): [True: 1.28k, False: 0]
671
11.0k
    if (num_overwritten != NUM_OS_RANDOM_BYTES) return false; /* If this failed, bailed out after too many tries */
  Branch (671:9): [True: 0, False: 11.0k]
672
673
    // Check that GetPerformanceCounter increases at least during a GetOSRand() call + 1ms sleep.
674
11.0k
    std::this_thread::sleep_for(std::chrono::milliseconds(1));
675
11.0k
    uint64_t stop = GetPerformanceCounter();
676
11.0k
    if (stop == start) return false;
  Branch (676:9): [True: 0, False: 11.0k]
677
678
    // We called GetPerformanceCounter. Use it as entropy.
679
11.0k
    CSHA512 to_add;
680
11.0k
    to_add.Write((const unsigned char*)&start, sizeof(start));
681
11.0k
    to_add.Write((const unsigned char*)&stop, sizeof(stop));
682
11.0k
    GetRNGState().MixExtract(nullptr, 0, std::move(to_add), false, /*always_use_real_rng=*/true);
683
684
11.0k
    return true;
685
11.0k
}
686
687
static constexpr std::array<std::byte, ChaCha20::KEYLEN> ZERO_KEY{};
688
689
48.5M
FastRandomContext::FastRandomContext(bool fDeterministic) noexcept : requires_seed(!fDeterministic), rng(ZERO_KEY)
690
48.5M
{
691
    // Note that despite always initializing with ZERO_KEY, requires_seed is set to true if not
692
    // fDeterministic. That means the rng will be reinitialized with a secure random key upon first
693
    // use.
694
48.5M
#ifdef FUZZAMOTO_FUZZING
695
    // Aggressively stub out rng when fuzzing with fuzzamoto. Simply using
696
    // MakeRandDeterministicDANGEROUS is not enough as thread scheduling still
697
    // makes it non-deterministic overall.
698
48.5M
    //requires_seed = false;
699
    // TODO: provide a seed e.g. via env variable to produce deterministic
700
    // but different results.
701
48.5M
#endif
702
48.5M
}
703
704
void RandomInit()
705
11.0k
{
706
    // Invoke RNG code to trigger initialization (if not already performed)
707
11.0k
    ProcRand(nullptr, 0, RNGLevel::FAST, /*always_use_real_rng=*/true);
708
709
11.0k
    ReportHardwareRand();
710
11.0k
}
711
712
double MakeExponentiallyDistributed(uint64_t uniform) noexcept
713
3.91M
{
714
    // To convert uniform into an exponentially-distributed double, we use two steps:
715
    // - Convert uniform into a uniformly-distributed double in range [0, 1), use the expression
716
    //   ((uniform >> 11) * 0x1.0p-53), as described in https://prng.di.unimi.it/ under
717
    //   "Generating uniform doubles in the unit interval". Call this value x.
718
    // - Given an x in uniformly distributed in [0, 1), we find an exponentially distributed value
719
    //   by applying the quantile function to it. For the exponential distribution with mean 1 this
720
    //   is F(x) = -log(1 - x).
721
    //
722
    // Combining the two, and using log1p(x) = log(1 + x), we obtain the following:
723
3.91M
    return -std::log1p((uniform >> 11) * -0x1.0p-53);
724
3.91M
}