Coverage Report

Created: 2025-06-10 13:21

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/bitcoin/src/index/base.cpp
Line
Count
Source
1
// Copyright (c) 2017-present The Bitcoin Core developers
2
// Distributed under the MIT software license, see the accompanying
3
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5
#include <chainparams.h>
6
#include <common/args.h>
7
#include <index/base.h>
8
#include <interfaces/chain.h>
9
#include <kernel/chain.h>
10
#include <logging.h>
11
#include <node/abort.h>
12
#include <node/blockstorage.h>
13
#include <node/context.h>
14
#include <node/database_args.h>
15
#include <node/interface_ui.h>
16
#include <tinyformat.h>
17
#include <util/string.h>
18
#include <util/thread.h>
19
#include <util/translation.h>
20
#include <validation.h>
21
22
#include <chrono>
23
#include <memory>
24
#include <optional>
25
#include <stdexcept>
26
#include <string>
27
#include <thread>
28
#include <utility>
29
30
constexpr uint8_t DB_BEST_BLOCK{'B'};
31
32
constexpr auto SYNC_LOG_INTERVAL{30s};
33
constexpr auto SYNC_LOCATOR_WRITE_INTERVAL{30s};
34
35
template <typename... Args>
36
void BaseIndex::FatalErrorf(util::ConstevalFormatString<sizeof...(Args)> fmt, const Args&... args)
37
0
{
38
0
    auto message = tfm::format(fmt, args...);
39
0
    node::AbortNode(m_chain->context()->shutdown_request, m_chain->context()->exit_status, Untranslated(message), m_chain->context()->warnings.get());
40
0
}
Unexecuted instantiation: void BaseIndex::FatalErrorf<char [5], std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >(util::ConstevalFormatString<2>, char const (&) [5], std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&)
Unexecuted instantiation: void BaseIndex::FatalErrorf<char [15], int>(util::ConstevalFormatString<2>, char const (&) [15], int const&)
Unexecuted instantiation: void BaseIndex::FatalErrorf<char [15], std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >(util::ConstevalFormatString<2>, char const (&) [15], std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&)
Unexecuted instantiation: void BaseIndex::FatalErrorf<char [18], std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >(util::ConstevalFormatString<2>, char const (&) [18], std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&)
41
42
CBlockLocator GetLocator(interfaces::Chain& chain, const uint256& block_hash)
43
20.6k
{
44
20.6k
    CBlockLocator locator;
45
20.6k
    bool found = chain.findBlock(block_hash, interfaces::FoundBlock().locator(locator));
46
20.6k
    assert(found);
  Branch (46:5): [True: 20.6k, False: 0]
47
20.6k
    assert(!locator.IsNull());
  Branch (47:5): [True: 20.6k, False: 0]
48
20.6k
    return locator;
49
20.6k
}
50
51
BaseIndex::DB::DB(const fs::path& path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) :
52
11.0k
    CDBWrapper{DBParams{
53
11.0k
        .path = path,
54
11.0k
        .cache_bytes = n_cache_size,
55
11.0k
        .memory_only = f_memory,
56
11.0k
        .wipe_data = f_wipe,
57
11.0k
        .obfuscate = f_obfuscate,
58
11.0k
        .options = [] { DBOptions options; node::ReadDatabaseArgs(gArgs, options); return options; }()}}
59
11.0k
{}
60
61
bool BaseIndex::DB::ReadBestBlock(CBlockLocator& locator) const
62
11.0k
{
63
11.0k
    bool success = Read(DB_BEST_BLOCK, locator);
64
11.0k
    if (!success) {
  Branch (64:9): [True: 11.0k, False: 0]
65
11.0k
        locator.SetNull();
66
11.0k
    }
67
11.0k
    return success;
68
11.0k
}
69
70
void BaseIndex::DB::WriteBestBlock(CDBBatch& batch, const CBlockLocator& locator)
71
20.6k
{
72
20.6k
    batch.Write(DB_BEST_BLOCK, locator);
73
20.6k
}
74
75
BaseIndex::BaseIndex(std::unique_ptr<interfaces::Chain> chain, std::string name)
76
11.0k
    : m_chain{std::move(chain)}, m_name{std::move(name)} {}
77
78
BaseIndex::~BaseIndex()
79
11.0k
{
80
11.0k
    Interrupt();
81
11.0k
    Stop();
82
11.0k
}
83
84
bool BaseIndex::Init()
85
11.0k
{
86
11.0k
    AssertLockNotHeld(cs_main);
87
88
    // May need reset if index is being restarted.
89
11.0k
    m_interrupt.reset();
90
91
    // m_chainstate member gives indexing code access to node internals. It is
92
    // removed in followup https://github.com/bitcoin/bitcoin/pull/24230
93
11.0k
    m_chainstate = WITH_LOCK(::cs_main,
94
11.0k
        return &m_chain->context()->chainman->GetChainstateForIndexing());
95
    // Register to validation interface before setting the 'm_synced' flag, so that
96
    // callbacks are not missed once m_synced is true.
97
11.0k
    m_chain->context()->validation_signals->RegisterValidationInterface(this);
98
99
11.0k
    CBlockLocator locator;
100
11.0k
    if (!GetDB().ReadBestBlock(locator)) {
  Branch (100:9): [True: 11.0k, False: 0]
101
11.0k
        locator.SetNull();
102
11.0k
    }
103
104
11.0k
    LOCK(cs_main);
105
11.0k
    CChain& index_chain = m_chainstate->m_chain;
106
107
11.0k
    if (locator.IsNull()) {
  Branch (107:9): [True: 11.0k, False: 0]
108
11.0k
        SetBestBlockIndex(nullptr);
109
11.0k
    } else {
110
        // Setting the best block to the locator's top block. If it is not part of the
111
        // best chain, we will rewind to the fork point during index sync
112
0
        const CBlockIndex* locator_index{m_chainstate->m_blockman.LookupBlockIndex(locator.vHave.at(0))};
113
0
        if (!locator_index) {
  Branch (113:13): [True: 0, False: 0]
114
0
            return InitError(Untranslated(strprintf("%s: best block of the index not found. Please rebuild the index.", GetName())));
115
0
        }
116
0
        SetBestBlockIndex(locator_index);
117
0
    }
118
119
    // Child init
120
11.0k
    const CBlockIndex* start_block = m_best_block_index.load();
121
11.0k
    if (!CustomInit(start_block ? std::make_optional(interfaces::BlockRef{start_block->GetBlockHash(), start_block->nHeight}) : std::nullopt)) {
  Branch (121:9): [True: 0, False: 11.0k]
  Branch (121:21): [True: 0, False: 11.0k]
122
0
        return false;
123
0
    }
124
125
    // Note: this will latch to true immediately if the user starts up with an empty
126
    // datadir and an index enabled. If this is the case, indexation will happen solely
127
    // via `BlockConnected` signals until, possibly, the next restart.
128
11.0k
    m_synced = start_block == index_chain.Tip();
129
11.0k
    m_init = true;
130
11.0k
    return true;
131
11.0k
}
132
133
static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev, CChain& chain) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
134
0
{
135
0
    AssertLockHeld(cs_main);
136
137
0
    if (!pindex_prev) {
  Branch (137:9): [True: 0, False: 0]
138
0
        return chain.Genesis();
139
0
    }
140
141
0
    const CBlockIndex* pindex = chain.Next(pindex_prev);
142
0
    if (pindex) {
  Branch (142:9): [True: 0, False: 0]
143
0
        return pindex;
144
0
    }
145
146
0
    return chain.Next(chain.FindFork(pindex_prev));
147
0
}
148
149
void BaseIndex::Sync()
150
11.0k
{
151
11.0k
    const CBlockIndex* pindex = m_best_block_index.load();
152
11.0k
    if (!m_synced) {
  Branch (152:9): [True: 0, False: 11.0k]
153
0
        std::chrono::steady_clock::time_point last_log_time{0s};
154
0
        std::chrono::steady_clock::time_point last_locator_write_time{0s};
155
0
        while (true) {
  Branch (155:16): [Folded - Ignored]
156
0
            if (m_interrupt) {
  Branch (156:17): [True: 0, False: 0]
157
0
                LogPrintf("%s: m_interrupt set; exiting ThreadSync\n", GetName());
158
159
0
                SetBestBlockIndex(pindex);
160
                // No need to handle errors in Commit. If it fails, the error will be already be
161
                // logged. The best way to recover is to continue, as index cannot be corrupted by
162
                // a missed commit to disk for an advanced index state.
163
0
                Commit();
164
0
                return;
165
0
            }
166
167
0
            const CBlockIndex* pindex_next = WITH_LOCK(cs_main, return NextSyncBlock(pindex, m_chainstate->m_chain));
168
            // If pindex_next is null, it means pindex is the chain tip, so
169
            // commit data indexed so far.
170
0
            if (!pindex_next) {
  Branch (170:17): [True: 0, False: 0]
171
0
                SetBestBlockIndex(pindex);
172
                // No need to handle errors in Commit. See rationale above.
173
0
                Commit();
174
175
                // If pindex is still the chain tip after committing, exit the
176
                // sync loop. It is important for cs_main to be locked while
177
                // setting m_synced = true, otherwise a new block could be
178
                // attached while m_synced is still false, and it would not be
179
                // indexed.
180
0
                LOCK(::cs_main);
181
0
                pindex_next = NextSyncBlock(pindex, m_chainstate->m_chain);
182
0
                if (!pindex_next) {
  Branch (182:21): [True: 0, False: 0]
183
0
                    m_synced = true;
184
0
                    break;
185
0
                }
186
0
            }
187
0
            if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) {
  Branch (187:17): [True: 0, False: 0]
  Branch (187:49): [True: 0, False: 0]
188
0
                FatalErrorf("%s: Failed to rewind index %s to a previous chain tip", __func__, GetName());
189
0
                return;
190
0
            }
191
0
            pindex = pindex_next;
192
193
194
0
            CBlock block;
195
0
            interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex);
196
0
            if (!m_chainstate->m_blockman.ReadBlock(block, *pindex)) {
  Branch (196:17): [True: 0, False: 0]
197
0
                FatalErrorf("%s: Failed to read block %s from disk",
198
0
                           __func__, pindex->GetBlockHash().ToString());
199
0
                return;
200
0
            } else {
201
0
                block_info.data = &block;
202
0
            }
203
0
            if (!CustomAppend(block_info)) {
  Branch (203:17): [True: 0, False: 0]
204
0
                FatalErrorf("%s: Failed to write block %s to index database",
205
0
                           __func__, pindex->GetBlockHash().ToString());
206
0
                return;
207
0
            }
208
209
0
            auto current_time{std::chrono::steady_clock::now()};
210
0
            if (last_log_time + SYNC_LOG_INTERVAL < current_time) {
  Branch (210:17): [True: 0, False: 0]
211
0
                LogPrintf("Syncing %s with block chain from height %d\n",
212
0
                          GetName(), pindex->nHeight);
213
0
                last_log_time = current_time;
214
0
            }
215
216
0
            if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) {
  Branch (216:17): [True: 0, False: 0]
217
0
                SetBestBlockIndex(pindex);
218
0
                last_locator_write_time = current_time;
219
                // No need to handle errors in Commit. See rationale above.
220
0
                Commit();
221
0
            }
222
0
        }
223
0
    }
224
225
11.0k
    if (pindex) {
  Branch (225:9): [True: 37, False: 11.0k]
226
37
        LogPrintf("%s is enabled at height %d\n", GetName(), pindex->nHeight);
227
11.0k
    } else {
228
11.0k
        LogPrintf("%s is enabled\n", GetName());
229
11.0k
    }
230
11.0k
}
231
232
bool BaseIndex::Commit()
233
20.6k
{
234
    // Don't commit anything if we haven't indexed any block yet
235
    // (this could happen if init is interrupted).
236
20.6k
    bool ok = m_best_block_index != nullptr;
237
20.6k
    if (ok) {
  Branch (237:9): [True: 20.6k, False: 0]
238
20.6k
        CDBBatch batch(GetDB());
239
20.6k
        ok = CustomCommit(batch);
240
20.6k
        if (ok) {
  Branch (240:13): [True: 20.6k, False: 0]
241
20.6k
            GetDB().WriteBestBlock(batch, GetLocator(*m_chain, m_best_block_index.load()->GetBlockHash()));
242
20.6k
            ok = GetDB().WriteBatch(batch);
243
20.6k
        }
244
20.6k
    }
245
20.6k
    if (!ok) {
  Branch (245:9): [True: 0, False: 20.6k]
246
0
        LogError("%s: Failed to commit latest %s state\n", __func__, GetName());
247
0
        return false;
248
0
    }
249
20.6k
    return true;
250
20.6k
}
251
252
bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_tip)
253
2.62k
{
254
2.62k
    assert(current_tip == m_best_block_index);
  Branch (254:5): [True: 2.62k, False: 0]
255
2.62k
    assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip);
  Branch (255:5): [True: 2.62k, False: 0]
256
257
2.62k
    if (!CustomRewind({current_tip->GetBlockHash(), current_tip->nHeight}, {new_tip->GetBlockHash(), new_tip->nHeight})) {
  Branch (257:9): [True: 0, False: 2.62k]
258
0
        return false;
259
0
    }
260
261
    // In the case of a reorg, ensure persisted block locator is not stale.
262
    // Pruning has a minimum of 288 blocks-to-keep and getting the index
263
    // out of sync may be possible but a users fault.
264
    // In case we reorg beyond the pruned depth, ReadBlock would
265
    // throw and lead to a graceful shutdown
266
2.62k
    SetBestBlockIndex(new_tip);
267
2.62k
    if (!Commit()) {
  Branch (267:9): [True: 0, False: 2.62k]
268
        // If commit fails, revert the best block index to avoid corruption.
269
0
        SetBestBlockIndex(current_tip);
270
0
        return false;
271
0
    }
272
273
2.62k
    return true;
274
2.62k
}
275
276
void BaseIndex::BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex)
277
2.23M
{
278
    // Ignore events from the assumed-valid chain; we will process its blocks
279
    // (sequentially) after it is fully verified by the background chainstate. This
280
    // is to avoid any out-of-order indexing.
281
    //
282
    // TODO at some point we could parameterize whether a particular index can be
283
    // built out of order, but for now just do the conservative simple thing.
284
2.23M
    if (role == ChainstateRole::ASSUMEDVALID) {
  Branch (284:9): [True: 0, False: 2.23M]
285
0
        return;
286
0
    }
287
288
    // Ignore BlockConnected signals until we have fully indexed the chain.
289
2.23M
    if (!m_synced) {
  Branch (289:9): [True: 0, False: 2.23M]
290
0
        return;
291
0
    }
292
293
2.23M
    const CBlockIndex* best_block_index = m_best_block_index.load();
294
2.23M
    if (!best_block_index) {
  Branch (294:9): [True: 11.0k, False: 2.22M]
295
11.0k
        if (pindex->nHeight != 0) {
  Branch (295:13): [True: 0, False: 11.0k]
296
0
            FatalErrorf("%s: First block connected is not the genesis block (height=%d)",
297
0
                       __func__, pindex->nHeight);
298
0
            return;
299
0
        }
300
2.22M
    } else {
301
        // Ensure block connects to an ancestor of the current best block. This should be the case
302
        // most of the time, but may not be immediately after the sync thread catches up and sets
303
        // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are
304
        // in the ValidationInterface queue backlog even after the sync thread has caught up to the
305
        // new chain tip. In this unlikely event, log a warning and let the queue clear.
306
2.22M
        if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) {
  Branch (306:13): [True: 0, False: 2.22M]
307
0
            LogPrintf("%s: WARNING: Block %s does not connect to an ancestor of "
308
0
                      "known best chain (tip=%s); not updating index\n",
309
0
                      __func__, pindex->GetBlockHash().ToString(),
310
0
                      best_block_index->GetBlockHash().ToString());
311
0
            return;
312
0
        }
313
2.22M
        if (best_block_index != pindex->pprev && !Rewind(best_block_index, pindex->pprev)) {
  Branch (313:13): [True: 2.62k, False: 2.22M]
  Branch (313:50): [True: 0, False: 2.62k]
314
0
            FatalErrorf("%s: Failed to rewind index %s to a previous chain tip",
315
0
                       __func__, GetName());
316
0
            return;
317
0
        }
318
2.22M
    }
319
2.23M
    interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex, block.get());
320
2.23M
    if (CustomAppend(block_info)) {
  Branch (320:9): [True: 2.23M, False: 0]
321
        // Setting the best block index is intentionally the last step of this
322
        // function, so BlockUntilSyncedToCurrentChain callers waiting for the
323
        // best block index to be updated can rely on the block being fully
324
        // processed, and the index object being safe to delete.
325
2.23M
        SetBestBlockIndex(pindex);
326
2.23M
    } else {
327
0
        FatalErrorf("%s: Failed to write block %s to index",
328
0
                   __func__, pindex->GetBlockHash().ToString());
329
0
        return;
330
0
    }
331
2.23M
}
332
333
void BaseIndex::ChainStateFlushed(ChainstateRole role, const CBlockLocator& locator)
334
18.1k
{
335
    // Ignore events from the assumed-valid chain; we will process its blocks
336
    // (sequentially) after it is fully verified by the background chainstate.
337
18.1k
    if (role == ChainstateRole::ASSUMEDVALID) {
  Branch (337:9): [True: 0, False: 18.1k]
338
0
        return;
339
0
    }
340
341
18.1k
    if (!m_synced) {
  Branch (341:9): [True: 0, False: 18.1k]
342
0
        return;
343
0
    }
344
345
18.1k
    const uint256& locator_tip_hash = locator.vHave.front();
346
18.1k
    const CBlockIndex* locator_tip_index;
347
18.1k
    {
348
18.1k
        LOCK(cs_main);
349
18.1k
        locator_tip_index = m_chainstate->m_blockman.LookupBlockIndex(locator_tip_hash);
350
18.1k
    }
351
352
18.1k
    if (!locator_tip_index) {
  Branch (352:9): [True: 0, False: 18.1k]
353
0
        FatalErrorf("%s: First block (hash=%s) in locator was not found",
354
0
                   __func__, locator_tip_hash.ToString());
355
0
        return;
356
0
    }
357
358
    // This checks that ChainStateFlushed callbacks are received after BlockConnected. The check may fail
359
    // immediately after the sync thread catches up and sets m_synced. Consider the case where
360
    // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue
361
    // backlog even after the sync thread has caught up to the new chain tip. In this unlikely
362
    // event, log a warning and let the queue clear.
363
18.1k
    const CBlockIndex* best_block_index = m_best_block_index.load();
364
18.1k
    if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) {
  Branch (364:9): [True: 81, False: 18.0k]
365
81
        LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known best "
366
81
                  "chain (tip=%s); not writing index locator\n",
367
81
                  __func__, locator_tip_hash.ToString(),
368
81
                  best_block_index->GetBlockHash().ToString());
369
81
        return;
370
81
    }
371
372
    // No need to handle errors in Commit. If it fails, the error will be already be logged. The
373
    // best way to recover is to continue, as index cannot be corrupted by a missed commit to disk
374
    // for an advanced index state.
375
18.0k
    Commit();
376
18.0k
}
377
378
bool BaseIndex::BlockUntilSyncedToCurrentChain() const
379
0
{
380
0
    AssertLockNotHeld(cs_main);
381
382
0
    if (!m_synced) {
  Branch (382:9): [True: 0, False: 0]
383
0
        return false;
384
0
    }
385
386
0
    {
387
        // Skip the queue-draining stuff if we know we're caught up with
388
        // m_chain.Tip().
389
0
        LOCK(cs_main);
390
0
        const CBlockIndex* chain_tip = m_chainstate->m_chain.Tip();
391
0
        const CBlockIndex* best_block_index = m_best_block_index.load();
392
0
        if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
  Branch (392:13): [True: 0, False: 0]
393
0
            return true;
394
0
        }
395
0
    }
396
397
0
    LogPrintf("%s: %s is catching up on block notifications\n", __func__, GetName());
398
0
    m_chain->context()->validation_signals->SyncWithValidationInterfaceQueue();
399
0
    return true;
400
0
}
401
402
void BaseIndex::Interrupt()
403
22.1k
{
404
22.1k
    m_interrupt();
405
22.1k
}
406
407
bool BaseIndex::StartBackgroundSync()
408
11.0k
{
409
11.0k
    if (!m_init) throw std::logic_error("Error: Cannot start a non-initialized index");
  Branch (409:9): [True: 0, False: 11.0k]
410
411
11.0k
    m_thread_sync = std::thread(&util::TraceThread, GetName(), [this] { Sync(); });
412
11.0k
    return true;
413
11.0k
}
414
415
void BaseIndex::Stop()
416
22.1k
{
417
22.1k
    if (m_chain->context()->validation_signals) {
  Branch (417:9): [True: 22.1k, False: 0]
418
22.1k
        m_chain->context()->validation_signals->UnregisterValidationInterface(this);
419
22.1k
    }
420
421
22.1k
    if (m_thread_sync.joinable()) {
  Branch (421:9): [True: 11.0k, False: 11.0k]
422
11.0k
        m_thread_sync.join();
423
11.0k
    }
424
22.1k
}
425
426
IndexSummary BaseIndex::GetSummary() const
427
11.0k
{
428
11.0k
    IndexSummary summary{};
429
11.0k
    summary.name = GetName();
430
11.0k
    summary.synced = m_synced;
431
11.0k
    if (const auto& pindex = m_best_block_index.load()) {
  Branch (431:21): [True: 14, False: 11.0k]
432
14
        summary.best_block_height = pindex->nHeight;
433
14
        summary.best_block_hash = pindex->GetBlockHash();
434
11.0k
    } else {
435
11.0k
        summary.best_block_height = 0;
436
11.0k
        summary.best_block_hash = m_chain->getBlockHash(0);
437
11.0k
    }
438
11.0k
    return summary;
439
11.0k
}
440
441
void BaseIndex::SetBestBlockIndex(const CBlockIndex* block)
442
2.25M
{
443
2.25M
    assert(!m_chainstate->m_blockman.IsPruneMode() || AllowPrune());
  Branch (443:5): [True: 2.25M, False: 0]
  Branch (443:5): [True: 0, False: 0]
  Branch (443:5): [True: 2.25M, False: 0]
444
445
2.25M
    if (AllowPrune() && block) {
  Branch (445:9): [True: 2.25M, False: 0]
  Branch (445:25): [True: 2.23M, False: 11.0k]
446
2.23M
        node::PruneLockInfo prune_lock;
447
2.23M
        prune_lock.height_first = block->nHeight;
448
2.23M
        WITH_LOCK(::cs_main, m_chainstate->m_blockman.UpdatePruneLock(GetName(), prune_lock));
449
2.23M
    }
450
451
    // Intentionally set m_best_block_index as the last step in this function,
452
    // after updating prune locks above, and after making any other references
453
    // to *this, so the BlockUntilSyncedToCurrentChain function (which checks
454
    // m_best_block_index as an optimization) can be used to wait for the last
455
    // BlockConnected notification and safely assume that prune locks are
456
    // updated and that the index object is safe to delete.
457
2.25M
    m_best_block_index = block;
458
2.25M
}