Branch data Line data Source code
1 : : // Copyright (c) 2017-2022 The Bitcoin Core developers
2 : : // Distributed under the MIT software license, see the accompanying
3 : : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 : :
5 : : #include <chainparams.h>
6 : : #include <common/args.h>
7 : : #include <index/base.h>
8 : : #include <interfaces/chain.h>
9 : : #include <kernel/chain.h>
10 : : #include <logging.h>
11 : : #include <node/abort.h>
12 : : #include <node/blockstorage.h>
13 : : #include <node/context.h>
14 : : #include <node/database_args.h>
15 : : #include <node/interface_ui.h>
16 : : #include <shutdown.h>
17 [ + - ]: 173 : #include <tinyformat.h>
18 [ + - ]: 173 : #include <util/thread.h>
19 : : #include <util/translation.h>
20 : : #include <validation.h> // For g_chainman
21 : : #include <warnings.h>
22 : :
23 : : #include <string>
24 : : #include <utility>
25 : :
26 : : constexpr uint8_t DB_BEST_BLOCK{'B'};
27 : :
28 : : constexpr auto SYNC_LOG_INTERVAL{30s};
29 : : constexpr auto SYNC_LOCATOR_WRITE_INTERVAL{30s};
30 : :
31 : : template <typename... Args>
32 : 0 : void BaseIndex::FatalErrorf(const char* fmt, const Args&... args)
33 : : {
34 : 0 : auto message = tfm::format(fmt, args...);
35 [ # # # # : 0 : node::AbortNode(m_chain->context()->exit_status, message);
# # # # #
# # # # #
# # ]
36 : 0 : }
37 : :
38 : 0 : CBlockLocator GetLocator(interfaces::Chain& chain, const uint256& block_hash)
39 : : {
40 : 0 : CBlockLocator locator;
41 [ # # # # ]: 0 : bool found = chain.findBlock(block_hash, interfaces::FoundBlock().locator(locator));
42 [ # # ]: 0 : assert(found);
43 [ # # # # ]: 0 : assert(!locator.IsNull());
44 : 0 : return locator;
45 [ # # ]: 0 : }
46 : :
47 : 0 : BaseIndex::DB::DB(const fs::path& path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) :
48 [ # # ]: 0 : CDBWrapper{DBParams{
49 : 0 : .path = path,
50 : 0 : .cache_bytes = n_cache_size,
51 : 0 : .memory_only = f_memory,
52 : 0 : .wipe_data = f_wipe,
53 : 0 : .obfuscate = f_obfuscate,
54 [ # # ]: 0 : .options = [] { DBOptions options; node::ReadDatabaseArgs(gArgs, options); return options; }()}}
55 : 0 : {}
56 : :
57 : 0 : bool BaseIndex::DB::ReadBestBlock(CBlockLocator& locator) const
58 : : {
59 : 0 : bool success = Read(DB_BEST_BLOCK, locator);
60 [ # # ]: 0 : if (!success) {
61 : 0 : locator.SetNull();
62 : 0 : }
63 : 0 : return success;
64 : : }
65 : 0 :
66 : 0 : void BaseIndex::DB::WriteBestBlock(CDBBatch& batch, const CBlockLocator& locator)
67 : : {
68 : 0 : batch.Write(DB_BEST_BLOCK, locator);
69 : 0 : }
70 : :
71 [ # # # # ]: 0 : BaseIndex::BaseIndex(std::unique_ptr<interfaces::Chain> chain, std::string name)
72 : 0 : : m_chain{std::move(chain)}, m_name{std::move(name)} {}
73 : 0 :
74 : 173 : BaseIndex::~BaseIndex()
75 : 0 : {
76 [ # # ]: 0 : Interrupt();
77 [ # # ]: 0 : Stop();
78 : 0 : }
79 : :
80 : 0 : bool BaseIndex::Init()
81 : : {
82 : 0 : AssertLockNotHeld(cs_main);
83 : :
84 : : // May need reset if index is being restarted.
85 : 0 : m_interrupt.reset();
86 : :
87 : : // m_chainstate member gives indexing code access to node internals. It is
88 : : // removed in followup https://github.com/bitcoin/bitcoin/pull/24230
89 [ # # # # ]: 0 : m_chainstate = WITH_LOCK(::cs_main,
90 : : return &m_chain->context()->chainman->GetChainstateForIndexing());
91 : : // Register to validation interface before setting the 'm_synced' flag, so that
92 : : // callbacks are not missed once m_synced is true.
93 : 0 : RegisterValidationInterface(this);
94 : :
95 : 0 : CBlockLocator locator;
96 [ # # # # : 0 : if (!GetDB().ReadBestBlock(locator)) {
# # ]
97 [ # # ]: 0 : locator.SetNull();
98 : 0 : }
99 : :
100 [ # # # # ]: 0 : LOCK(cs_main);
101 : 0 : CChain& index_chain = m_chainstate->m_chain;
102 : :
103 [ # # # # ]: 0 : if (locator.IsNull()) {
104 [ # # ]: 0 : SetBestBlockIndex(nullptr);
105 : 0 : } else {
106 : : // Setting the best block to the locator's top block. If it is not part of the
107 : : // best chain, we will rewind to the fork point during index sync
108 [ # # # # ]: 0 : const CBlockIndex* locator_index{m_chainstate->m_blockman.LookupBlockIndex(locator.vHave.at(0))};
109 [ # # ]: 0 : if (!locator_index) {
110 [ # # # # : 0 : return InitError(strprintf(Untranslated("%s: best block of the index not found. Please rebuild the index."), GetName()));
# # # # #
# ]
111 : : }
112 [ # # ]: 0 : SetBestBlockIndex(locator_index);
113 : : }
114 : :
115 : : // Child init
116 : 0 : const CBlockIndex* start_block = m_best_block_index.load();
117 [ # # # # : 0 : if (!CustomInit(start_block ? std::make_optional(interfaces::BlockKey{start_block->GetBlockHash(), start_block->nHeight}) : std::nullopt)) {
# # # # #
# ]
118 : 0 : return false;
119 : : }
120 : :
121 : : // Note: this will latch to true immediately if the user starts up with an empty
122 : : // datadir and an index enabled. If this is the case, indexation will happen solely
123 : : // via `BlockConnected` signals until, possibly, the next restart.
124 [ # # ]: 0 : m_synced = start_block == index_chain.Tip();
125 : 0 : m_init = true;
126 : 0 : return true;
127 : 0 : }
128 : :
129 : 0 : static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev, CChain& chain) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
130 : : {
131 : 0 : AssertLockHeld(cs_main);
132 : :
133 [ # # ]: 0 : if (!pindex_prev) {
134 : 0 : return chain.Genesis();
135 : : }
136 : :
137 : 0 : const CBlockIndex* pindex = chain.Next(pindex_prev);
138 [ # # ]: 0 : if (pindex) {
139 : 0 : return pindex;
140 : : }
141 : :
142 : 0 : return chain.Next(chain.FindFork(pindex_prev));
143 : 0 : }
144 : :
145 : 0 : void BaseIndex::ThreadSync()
146 : : {
147 : 0 : const CBlockIndex* pindex = m_best_block_index.load();
148 [ # # ]: 0 : if (!m_synced) {
149 : 0 : std::chrono::steady_clock::time_point last_log_time{0s};
150 : 0 : std::chrono::steady_clock::time_point last_locator_write_time{0s};
151 : 0 : while (true) {
152 [ # # ]: 0 : if (m_interrupt) {
153 [ # # # # : 0 : LogPrintf("%s: m_interrupt set; exiting ThreadSync\n", GetName());
# # # # ]
154 : :
155 : 0 : SetBestBlockIndex(pindex);
156 : : // No need to handle errors in Commit. If it fails, the error will be already be
157 : : // logged. The best way to recover is to continue, as index cannot be corrupted by
158 : : // a missed commit to disk for an advanced index state.
159 : 0 : Commit();
160 : 0 : return;
161 : : }
162 : :
163 : : {
164 : 0 : LOCK(cs_main);
165 [ # # ]: 0 : const CBlockIndex* pindex_next = NextSyncBlock(pindex, m_chainstate->m_chain);
166 [ # # ]: 0 : if (!pindex_next) {
167 [ # # ]: 0 : SetBestBlockIndex(pindex);
168 : 0 : m_synced = true;
169 : : // No need to handle errors in Commit. See rationale above.
170 [ # # ]: 0 : Commit();
171 : 0 : break;
172 : : }
173 [ # # # # : 0 : if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) {
# # ]
174 [ # # ]: 0 : FatalErrorf("%s: Failed to rewind index %s to a previous chain tip",
175 [ # # ]: 0 : __func__, GetName());
176 : 0 : return;
177 : : }
178 : 0 : pindex = pindex_next;
179 [ # # # ]: 0 : }
180 : :
181 : 0 : auto current_time{std::chrono::steady_clock::now()};
182 [ # # ]: 0 : if (last_log_time + SYNC_LOG_INTERVAL < current_time) {
183 [ # # # # : 0 : LogPrintf("Syncing %s with block chain from height %d\n",
# # # # ]
184 : : GetName(), pindex->nHeight);
185 : 0 : last_log_time = current_time;
186 : 0 : }
187 : :
188 [ # # ]: 0 : if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) {
189 : 0 : SetBestBlockIndex(pindex->pprev);
190 : 0 : last_locator_write_time = current_time;
191 : : // No need to handle errors in Commit. See rationale above.
192 : 0 : Commit();
193 : 0 : }
194 : :
195 : 0 : CBlock block;
196 [ # # ]: 0 : interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex);
197 [ # # # # ]: 0 : if (!m_chainstate->m_blockman.ReadBlockFromDisk(block, *pindex)) {
198 [ # # ]: 0 : FatalErrorf("%s: Failed to read block %s from disk",
199 [ # # # # ]: 0 : __func__, pindex->GetBlockHash().ToString());
200 : 0 : return;
201 : : } else {
202 : 0 : block_info.data = █
203 : : }
204 [ # # # # ]: 0 : if (!CustomAppend(block_info)) {
205 [ # # ]: 0 : FatalErrorf("%s: Failed to write block %s to index database",
206 [ # # # # ]: 0 : __func__, pindex->GetBlockHash().ToString());
207 : 0 : return;
208 : : }
209 [ # # ]: 0 : }
210 : 0 : }
211 : :
212 [ # # ]: 0 : if (pindex) {
213 [ # # # # : 0 : LogPrintf("%s is enabled at height %d\n", GetName(), pindex->nHeight);
# # # # ]
214 : 0 : } else {
215 [ # # # # : 0 : LogPrintf("%s is enabled\n", GetName());
# # # # ]
216 : : }
217 : 0 : }
218 : :
219 : 0 : bool BaseIndex::Commit()
220 : : {
221 : : // Don't commit anything if we haven't indexed any block yet
222 : : // (this could happen if init is interrupted).
223 : 0 : bool ok = m_best_block_index != nullptr;
224 [ # # ]: 0 : if (ok) {
225 : 0 : CDBBatch batch(GetDB());
226 [ # # ]: 0 : ok = CustomCommit(batch);
227 [ # # ]: 0 : if (ok) {
228 [ # # # # : 0 : GetDB().WriteBestBlock(batch, GetLocator(*m_chain, m_best_block_index.load()->GetBlockHash()));
# # # # #
# ]
229 [ # # # # ]: 0 : ok = GetDB().WriteBatch(batch);
230 : 0 : }
231 : 0 : }
232 [ # # ]: 0 : if (!ok) {
233 : 0 : return error("%s: Failed to commit latest %s state", __func__, GetName());
234 : : }
235 : 0 : return true;
236 : 0 : }
237 : :
238 : 0 : bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_tip)
239 : : {
240 [ # # ]: 0 : assert(current_tip == m_best_block_index);
241 [ # # ]: 0 : assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip);
242 : :
243 [ # # ]: 0 : if (!CustomRewind({current_tip->GetBlockHash(), current_tip->nHeight}, {new_tip->GetBlockHash(), new_tip->nHeight})) {
244 : 0 : return false;
245 : : }
246 : :
247 : : // In the case of a reorg, ensure persisted block locator is not stale.
248 : : // Pruning has a minimum of 288 blocks-to-keep and getting the index
249 : : // out of sync may be possible but a users fault.
250 : : // In case we reorg beyond the pruned depth, ReadBlockFromDisk would
251 : : // throw and lead to a graceful shutdown
252 : 0 : SetBestBlockIndex(new_tip);
253 [ # # ]: 0 : if (!Commit()) {
254 : : // If commit fails, revert the best block index to avoid corruption.
255 : 0 : SetBestBlockIndex(current_tip);
256 : 0 : return false;
257 : : }
258 : :
259 : 0 : return true;
260 : 0 : }
261 : :
262 : 0 : void BaseIndex::BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex)
263 : : {
264 : : // Ignore events from the assumed-valid chain; we will process its blocks
265 : : // (sequentially) after it is fully verified by the background chainstate. This
266 : : // is to avoid any out-of-order indexing.
267 : : //
268 : : // TODO at some point we could parameterize whether a particular index can be
269 : : // built out of order, but for now just do the conservative simple thing.
270 [ # # ]: 0 : if (role == ChainstateRole::ASSUMEDVALID) {
271 : 0 : return;
272 : : }
273 : :
274 : : // Ignore BlockConnected signals until we have fully indexed the chain.
275 [ # # ]: 0 : if (!m_synced) {
276 : 0 : return;
277 : : }
278 : :
279 : 0 : const CBlockIndex* best_block_index = m_best_block_index.load();
280 [ # # ]: 0 : if (!best_block_index) {
281 [ # # ]: 0 : if (pindex->nHeight != 0) {
282 : 0 : FatalErrorf("%s: First block connected is not the genesis block (height=%d)",
283 : 0 : __func__, pindex->nHeight);
284 : 0 : return;
285 : : }
286 : 0 : } else {
287 : : // Ensure block connects to an ancestor of the current best block. This should be the case
288 : : // most of the time, but may not be immediately after the sync thread catches up and sets
289 : : // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are
290 : : // in the ValidationInterface queue backlog even after the sync thread has caught up to the
291 : : // new chain tip. In this unlikely event, log a warning and let the queue clear.
292 [ # # ]: 0 : if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) {
293 [ # # # # : 0 : LogPrintf("%s: WARNING: Block %s does not connect to an ancestor of "
# # # # #
# # # #
# ]
294 : : "known best chain (tip=%s); not updating index\n",
295 : : __func__, pindex->GetBlockHash().ToString(),
296 : : best_block_index->GetBlockHash().ToString());
297 : 0 : return;
298 : : }
299 [ # # # # ]: 0 : if (best_block_index != pindex->pprev && !Rewind(best_block_index, pindex->pprev)) {
300 : 0 : FatalErrorf("%s: Failed to rewind index %s to a previous chain tip",
301 : 0 : __func__, GetName());
302 : 0 : return;
303 : : }
304 : : }
305 : 0 : interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex, block.get());
306 [ # # ]: 0 : if (CustomAppend(block_info)) {
307 : : // Setting the best block index is intentionally the last step of this
308 : : // function, so BlockUntilSyncedToCurrentChain callers waiting for the
309 : : // best block index to be updated can rely on the block being fully
310 : : // processed, and the index object being safe to delete.
311 : 0 : SetBestBlockIndex(pindex);
312 : 0 : } else {
313 [ # # ]: 0 : FatalErrorf("%s: Failed to write block %s to index",
314 : 0 : __func__, pindex->GetBlockHash().ToString());
315 : 0 : return;
316 : : }
317 : 0 : }
318 : :
319 : 0 : void BaseIndex::ChainStateFlushed(ChainstateRole role, const CBlockLocator& locator)
320 : : {
321 : : // Ignore events from the assumed-valid chain; we will process its blocks
322 : : // (sequentially) after it is fully verified by the background chainstate.
323 [ # # ]: 0 : if (role == ChainstateRole::ASSUMEDVALID) {
324 : 0 : return;
325 : : }
326 : :
327 [ # # ]: 0 : if (!m_synced) {
328 : 0 : return;
329 : : }
330 : :
331 : 0 : const uint256& locator_tip_hash = locator.vHave.front();
332 : : const CBlockIndex* locator_tip_index;
333 : : {
334 : 0 : LOCK(cs_main);
335 [ # # ]: 0 : locator_tip_index = m_chainstate->m_blockman.LookupBlockIndex(locator_tip_hash);
336 : 0 : }
337 : :
338 [ # # ]: 0 : if (!locator_tip_index) {
339 [ # # ]: 0 : FatalErrorf("%s: First block (hash=%s) in locator was not found",
340 : 0 : __func__, locator_tip_hash.ToString());
341 : 0 : return;
342 : : }
343 : :
344 : : // This checks that ChainStateFlushed callbacks are received after BlockConnected. The check may fail
345 : : // immediately after the sync thread catches up and sets m_synced. Consider the case where
346 : : // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue
347 : : // backlog even after the sync thread has caught up to the new chain tip. In this unlikely
348 : : // event, log a warning and let the queue clear.
349 : 0 : const CBlockIndex* best_block_index = m_best_block_index.load();
350 [ # # ]: 0 : if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) {
351 [ # # # # : 0 : LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known best "
# # # # #
# # # ]
352 : : "chain (tip=%s); not writing index locator\n",
353 : : __func__, locator_tip_hash.ToString(),
354 : : best_block_index->GetBlockHash().ToString());
355 : 0 : return;
356 : : }
357 : :
358 : : // No need to handle errors in Commit. If it fails, the error will be already be logged. The
359 : : // best way to recover is to continue, as index cannot be corrupted by a missed commit to disk
360 : : // for an advanced index state.
361 : 0 : Commit();
362 : 0 : }
363 : :
364 : 0 : bool BaseIndex::BlockUntilSyncedToCurrentChain() const
365 : : {
366 : 0 : AssertLockNotHeld(cs_main);
367 : :
368 [ # # ]: 0 : if (!m_synced) {
369 : 0 : return false;
370 : : }
371 : :
372 : : {
373 : : // Skip the queue-draining stuff if we know we're caught up with
374 : : // m_chain.Tip().
375 : 0 : LOCK(cs_main);
376 [ # # ]: 0 : const CBlockIndex* chain_tip = m_chainstate->m_chain.Tip();
377 : 0 : const CBlockIndex* best_block_index = m_best_block_index.load();
378 [ # # # # ]: 0 : if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
379 : 0 : return true;
380 : : }
381 [ # # # ]: 0 : }
382 : :
383 [ # # # # : 0 : LogPrintf("%s: %s is catching up on block notifications\n", __func__, GetName());
# # # # ]
384 : 0 : SyncWithValidationInterfaceQueue();
385 : 0 : return true;
386 : 0 : }
387 : :
388 : 0 : void BaseIndex::Interrupt()
389 : : {
390 : 0 : m_interrupt();
391 : 0 : }
392 : :
393 : 0 : bool BaseIndex::StartBackgroundSync()
394 : : {
395 [ # # # # ]: 0 : if (!m_init) throw std::logic_error("Error: Cannot start a non-initialized index");
396 : :
397 : 0 : m_thread_sync = std::thread(&util::TraceThread, GetName(), [this] { ThreadSync(); });
398 : 0 : return true;
399 : 0 : }
400 : :
401 : 0 : void BaseIndex::Stop()
402 : : {
403 : 0 : UnregisterValidationInterface(this);
404 : :
405 [ # # ]: 0 : if (m_thread_sync.joinable()) {
406 : 0 : m_thread_sync.join();
407 : 0 : }
408 : 0 : }
409 : :
410 : 0 : IndexSummary BaseIndex::GetSummary() const
411 : : {
412 [ # # ]: 0 : IndexSummary summary{};
413 [ # # # # ]: 0 : summary.name = GetName();
414 : 0 : summary.synced = m_synced;
415 [ # # ]: 0 : if (const auto& pindex = m_best_block_index.load()) {
416 : 0 : summary.best_block_height = pindex->nHeight;
417 [ # # ]: 0 : summary.best_block_hash = pindex->GetBlockHash();
418 : 0 : } else {
419 : 0 : summary.best_block_height = 0;
420 [ # # ]: 0 : summary.best_block_hash = m_chain->getBlockHash(0);
421 : : }
422 : 0 : return summary;
423 [ # # ]: 0 : }
424 : :
425 : 0 : void BaseIndex::SetBestBlockIndex(const CBlockIndex* block)
426 : : {
427 [ # # # # ]: 0 : assert(!m_chainstate->m_blockman.IsPruneMode() || AllowPrune());
428 : :
429 [ # # # # ]: 0 : if (AllowPrune() && block) {
430 : 0 : node::PruneLockInfo prune_lock;
431 : 0 : prune_lock.height_first = block->nHeight;
432 [ # # ]: 0 : WITH_LOCK(::cs_main, m_chainstate->m_blockman.UpdatePruneLock(GetName(), prune_lock));
433 : 0 : }
434 : :
435 : : // Intentionally set m_best_block_index as the last step in this function,
436 : : // after updating prune locks above, and after making any other references
437 : : // to *this, so the BlockUntilSyncedToCurrentChain function (which checks
438 : : // m_best_block_index as an optimization) can be used to wait for the last
439 : : // BlockConnected notification and safely assume that prune locks are
440 : : // updated and that the index object is safe to delete.
441 : 0 : m_best_block_index = block;
442 : 0 : }
|