Line data Source code
1 : // Copyright (c) 2017-2022 The Bitcoin Core developers
2 : // Distributed under the MIT software license, see the accompanying
3 : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 :
5 : #include <chainparams.h>
6 : #include <common/args.h>
7 : #include <index/base.h>
8 : #include <interfaces/chain.h>
9 : #include <kernel/chain.h>
10 : #include <logging.h>
11 : #include <node/abort.h>
12 : #include <node/blockstorage.h>
13 : #include <node/context.h>
14 : #include <node/database_args.h>
15 : #include <node/interface_ui.h>
16 : #include <shutdown.h>
17 2 : #include <tinyformat.h>
18 2 : #include <util/thread.h>
19 : #include <util/translation.h>
20 : #include <validation.h> // For g_chainman
21 : #include <warnings.h>
22 :
23 : #include <string>
24 : #include <utility>
25 :
26 : constexpr uint8_t DB_BEST_BLOCK{'B'};
27 :
28 : constexpr auto SYNC_LOG_INTERVAL{30s};
29 : constexpr auto SYNC_LOCATOR_WRITE_INTERVAL{30s};
30 :
31 : template <typename... Args>
32 0 : void BaseIndex::FatalErrorf(const char* fmt, const Args&... args)
33 : {
34 0 : auto message = tfm::format(fmt, args...);
35 0 : node::AbortNode(m_chain->context()->exit_status, message);
36 0 : }
37 :
38 0 : CBlockLocator GetLocator(interfaces::Chain& chain, const uint256& block_hash)
39 : {
40 0 : CBlockLocator locator;
41 0 : bool found = chain.findBlock(block_hash, interfaces::FoundBlock().locator(locator));
42 0 : assert(found);
43 0 : assert(!locator.IsNull());
44 0 : return locator;
45 0 : }
46 :
47 0 : BaseIndex::DB::DB(const fs::path& path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) :
48 0 : CDBWrapper{DBParams{
49 0 : .path = path,
50 0 : .cache_bytes = n_cache_size,
51 0 : .memory_only = f_memory,
52 0 : .wipe_data = f_wipe,
53 0 : .obfuscate = f_obfuscate,
54 0 : .options = [] { DBOptions options; node::ReadDatabaseArgs(gArgs, options); return options; }()}}
55 0 : {}
56 :
57 0 : bool BaseIndex::DB::ReadBestBlock(CBlockLocator& locator) const
58 : {
59 0 : bool success = Read(DB_BEST_BLOCK, locator);
60 0 : if (!success) {
61 0 : locator.SetNull();
62 0 : }
63 0 : return success;
64 : }
65 :
66 0 : void BaseIndex::DB::WriteBestBlock(CDBBatch& batch, const CBlockLocator& locator)
67 0 : {
68 0 : batch.Write(DB_BEST_BLOCK, locator);
69 0 : }
70 0 :
71 0 : BaseIndex::BaseIndex(std::unique_ptr<interfaces::Chain> chain, std::string name)
72 0 : : m_chain{std::move(chain)}, m_name{std::move(name)} {}
73 :
74 2 : BaseIndex::~BaseIndex()
75 0 : {
76 0 : Interrupt();
77 0 : Stop();
78 0 : }
79 :
80 0 : bool BaseIndex::Init()
81 : {
82 : // m_chainstate member gives indexing code access to node internals. It is
83 : // removed in followup https://github.com/bitcoin/bitcoin/pull/24230
84 0 : m_chainstate = &m_chain->context()->chainman->ActiveChainstate();
85 : // Register to validation interface before setting the 'm_synced' flag, so that
86 : // callbacks are not missed once m_synced is true.
87 0 : RegisterValidationInterface(this);
88 :
89 0 : CBlockLocator locator;
90 0 : if (!GetDB().ReadBestBlock(locator)) {
91 0 : locator.SetNull();
92 0 : }
93 :
94 0 : LOCK(cs_main);
95 0 : CChain& active_chain = m_chainstate->m_chain;
96 0 : if (locator.IsNull()) {
97 0 : SetBestBlockIndex(nullptr);
98 0 : } else {
99 : // Setting the best block to the locator's top block. If it is not part of the
100 : // best chain, we will rewind to the fork point during index sync
101 0 : const CBlockIndex* locator_index{m_chainstate->m_blockman.LookupBlockIndex(locator.vHave.at(0))};
102 0 : if (!locator_index) {
103 0 : return InitError(strprintf(Untranslated("%s: best block of the index not found. Please rebuild the index."), GetName()));
104 : }
105 0 : SetBestBlockIndex(locator_index);
106 : }
107 :
108 : // Child init
109 0 : const CBlockIndex* start_block = m_best_block_index.load();
110 0 : if (!CustomInit(start_block ? std::make_optional(interfaces::BlockKey{start_block->GetBlockHash(), start_block->nHeight}) : std::nullopt)) {
111 0 : return false;
112 : }
113 :
114 : // Note: this will latch to true immediately if the user starts up with an empty
115 : // datadir and an index enabled. If this is the case, indexation will happen solely
116 : // via `BlockConnected` signals until, possibly, the next restart.
117 0 : m_synced = start_block == active_chain.Tip();
118 0 : m_init = true;
119 0 : return true;
120 0 : }
121 :
122 0 : static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev, CChain& chain) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
123 : {
124 0 : AssertLockHeld(cs_main);
125 :
126 0 : if (!pindex_prev) {
127 0 : return chain.Genesis();
128 : }
129 :
130 0 : const CBlockIndex* pindex = chain.Next(pindex_prev);
131 0 : if (pindex) {
132 0 : return pindex;
133 : }
134 :
135 0 : return chain.Next(chain.FindFork(pindex_prev));
136 0 : }
137 :
138 0 : void BaseIndex::ThreadSync()
139 : {
140 0 : const CBlockIndex* pindex = m_best_block_index.load();
141 0 : if (!m_synced) {
142 0 : std::chrono::steady_clock::time_point last_log_time{0s};
143 0 : std::chrono::steady_clock::time_point last_locator_write_time{0s};
144 0 : while (true) {
145 0 : if (m_interrupt) {
146 0 : SetBestBlockIndex(pindex);
147 : // No need to handle errors in Commit. If it fails, the error will be already be
148 : // logged. The best way to recover is to continue, as index cannot be corrupted by
149 : // a missed commit to disk for an advanced index state.
150 0 : Commit();
151 0 : return;
152 : }
153 :
154 : {
155 0 : LOCK(cs_main);
156 0 : const CBlockIndex* pindex_next = NextSyncBlock(pindex, m_chainstate->m_chain);
157 0 : if (!pindex_next) {
158 0 : SetBestBlockIndex(pindex);
159 0 : m_synced = true;
160 : // No need to handle errors in Commit. See rationale above.
161 0 : Commit();
162 0 : break;
163 : }
164 0 : if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) {
165 0 : FatalErrorf("%s: Failed to rewind index %s to a previous chain tip",
166 0 : __func__, GetName());
167 0 : return;
168 : }
169 0 : pindex = pindex_next;
170 0 : }
171 :
172 0 : auto current_time{std::chrono::steady_clock::now()};
173 0 : if (last_log_time + SYNC_LOG_INTERVAL < current_time) {
174 0 : LogPrintf("Syncing %s with block chain from height %d\n",
175 : GetName(), pindex->nHeight);
176 0 : last_log_time = current_time;
177 0 : }
178 :
179 0 : if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) {
180 0 : SetBestBlockIndex(pindex->pprev);
181 0 : last_locator_write_time = current_time;
182 : // No need to handle errors in Commit. See rationale above.
183 0 : Commit();
184 0 : }
185 :
186 0 : CBlock block;
187 0 : interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex);
188 0 : if (!m_chainstate->m_blockman.ReadBlockFromDisk(block, *pindex)) {
189 0 : FatalErrorf("%s: Failed to read block %s from disk",
190 0 : __func__, pindex->GetBlockHash().ToString());
191 0 : return;
192 : } else {
193 0 : block_info.data = █
194 : }
195 0 : if (!CustomAppend(block_info)) {
196 0 : FatalErrorf("%s: Failed to write block %s to index database",
197 0 : __func__, pindex->GetBlockHash().ToString());
198 0 : return;
199 : }
200 0 : }
201 0 : }
202 :
203 0 : if (pindex) {
204 0 : LogPrintf("%s is enabled at height %d\n", GetName(), pindex->nHeight);
205 0 : } else {
206 0 : LogPrintf("%s is enabled\n", GetName());
207 : }
208 0 : }
209 :
210 0 : bool BaseIndex::Commit()
211 : {
212 : // Don't commit anything if we haven't indexed any block yet
213 : // (this could happen if init is interrupted).
214 0 : bool ok = m_best_block_index != nullptr;
215 0 : if (ok) {
216 0 : CDBBatch batch(GetDB());
217 0 : ok = CustomCommit(batch);
218 0 : if (ok) {
219 0 : GetDB().WriteBestBlock(batch, GetLocator(*m_chain, m_best_block_index.load()->GetBlockHash()));
220 0 : ok = GetDB().WriteBatch(batch);
221 0 : }
222 0 : }
223 0 : if (!ok) {
224 0 : return error("%s: Failed to commit latest %s state", __func__, GetName());
225 : }
226 0 : return true;
227 0 : }
228 :
229 0 : bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_tip)
230 : {
231 0 : assert(current_tip == m_best_block_index);
232 0 : assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip);
233 :
234 0 : if (!CustomRewind({current_tip->GetBlockHash(), current_tip->nHeight}, {new_tip->GetBlockHash(), new_tip->nHeight})) {
235 0 : return false;
236 : }
237 :
238 : // In the case of a reorg, ensure persisted block locator is not stale.
239 : // Pruning has a minimum of 288 blocks-to-keep and getting the index
240 : // out of sync may be possible but a users fault.
241 : // In case we reorg beyond the pruned depth, ReadBlockFromDisk would
242 : // throw and lead to a graceful shutdown
243 0 : SetBestBlockIndex(new_tip);
244 0 : if (!Commit()) {
245 : // If commit fails, revert the best block index to avoid corruption.
246 0 : SetBestBlockIndex(current_tip);
247 0 : return false;
248 : }
249 :
250 0 : return true;
251 0 : }
252 :
253 0 : void BaseIndex::BlockConnected(const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex)
254 : {
255 0 : if (!m_synced) {
256 0 : return;
257 : }
258 :
259 0 : const CBlockIndex* best_block_index = m_best_block_index.load();
260 0 : if (!best_block_index) {
261 0 : if (pindex->nHeight != 0) {
262 0 : FatalErrorf("%s: First block connected is not the genesis block (height=%d)",
263 0 : __func__, pindex->nHeight);
264 0 : return;
265 : }
266 0 : } else {
267 : // Ensure block connects to an ancestor of the current best block. This should be the case
268 : // most of the time, but may not be immediately after the sync thread catches up and sets
269 : // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are
270 : // in the ValidationInterface queue backlog even after the sync thread has caught up to the
271 : // new chain tip. In this unlikely event, log a warning and let the queue clear.
272 0 : if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) {
273 0 : LogPrintf("%s: WARNING: Block %s does not connect to an ancestor of "
274 : "known best chain (tip=%s); not updating index\n",
275 : __func__, pindex->GetBlockHash().ToString(),
276 : best_block_index->GetBlockHash().ToString());
277 0 : return;
278 : }
279 0 : if (best_block_index != pindex->pprev && !Rewind(best_block_index, pindex->pprev)) {
280 0 : FatalErrorf("%s: Failed to rewind index %s to a previous chain tip",
281 0 : __func__, GetName());
282 0 : return;
283 : }
284 : }
285 0 : interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex, block.get());
286 0 : if (CustomAppend(block_info)) {
287 : // Setting the best block index is intentionally the last step of this
288 : // function, so BlockUntilSyncedToCurrentChain callers waiting for the
289 : // best block index to be updated can rely on the block being fully
290 : // processed, and the index object being safe to delete.
291 0 : SetBestBlockIndex(pindex);
292 0 : } else {
293 0 : FatalErrorf("%s: Failed to write block %s to index",
294 0 : __func__, pindex->GetBlockHash().ToString());
295 0 : return;
296 : }
297 0 : }
298 :
299 0 : void BaseIndex::ChainStateFlushed(const CBlockLocator& locator)
300 : {
301 0 : if (!m_synced) {
302 0 : return;
303 : }
304 :
305 0 : const uint256& locator_tip_hash = locator.vHave.front();
306 : const CBlockIndex* locator_tip_index;
307 : {
308 0 : LOCK(cs_main);
309 0 : locator_tip_index = m_chainstate->m_blockman.LookupBlockIndex(locator_tip_hash);
310 0 : }
311 :
312 0 : if (!locator_tip_index) {
313 0 : FatalErrorf("%s: First block (hash=%s) in locator was not found",
314 0 : __func__, locator_tip_hash.ToString());
315 0 : return;
316 : }
317 :
318 : // This checks that ChainStateFlushed callbacks are received after BlockConnected. The check may fail
319 : // immediately after the sync thread catches up and sets m_synced. Consider the case where
320 : // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue
321 : // backlog even after the sync thread has caught up to the new chain tip. In this unlikely
322 : // event, log a warning and let the queue clear.
323 0 : const CBlockIndex* best_block_index = m_best_block_index.load();
324 0 : if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) {
325 0 : LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known best "
326 : "chain (tip=%s); not writing index locator\n",
327 : __func__, locator_tip_hash.ToString(),
328 : best_block_index->GetBlockHash().ToString());
329 0 : return;
330 : }
331 :
332 : // No need to handle errors in Commit. If it fails, the error will be already be logged. The
333 : // best way to recover is to continue, as index cannot be corrupted by a missed commit to disk
334 : // for an advanced index state.
335 0 : Commit();
336 0 : }
337 :
338 0 : bool BaseIndex::BlockUntilSyncedToCurrentChain() const
339 : {
340 0 : AssertLockNotHeld(cs_main);
341 :
342 0 : if (!m_synced) {
343 0 : return false;
344 : }
345 :
346 : {
347 : // Skip the queue-draining stuff if we know we're caught up with
348 : // m_chain.Tip().
349 0 : LOCK(cs_main);
350 0 : const CBlockIndex* chain_tip = m_chainstate->m_chain.Tip();
351 0 : const CBlockIndex* best_block_index = m_best_block_index.load();
352 0 : if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
353 0 : return true;
354 : }
355 0 : }
356 :
357 0 : LogPrintf("%s: %s is catching up on block notifications\n", __func__, GetName());
358 0 : SyncWithValidationInterfaceQueue();
359 0 : return true;
360 0 : }
361 :
362 0 : void BaseIndex::Interrupt()
363 : {
364 0 : m_interrupt();
365 0 : }
366 :
367 0 : bool BaseIndex::StartBackgroundSync()
368 : {
369 0 : if (!m_init) throw std::logic_error("Error: Cannot start a non-initialized index");
370 :
371 0 : m_thread_sync = std::thread(&util::TraceThread, GetName(), [this] { ThreadSync(); });
372 0 : return true;
373 0 : }
374 :
375 0 : void BaseIndex::Stop()
376 : {
377 0 : UnregisterValidationInterface(this);
378 :
379 0 : if (m_thread_sync.joinable()) {
380 0 : m_thread_sync.join();
381 0 : }
382 0 : }
383 :
384 0 : IndexSummary BaseIndex::GetSummary() const
385 : {
386 0 : IndexSummary summary{};
387 0 : summary.name = GetName();
388 0 : summary.synced = m_synced;
389 0 : if (const auto& pindex = m_best_block_index.load()) {
390 0 : summary.best_block_height = pindex->nHeight;
391 0 : summary.best_block_hash = pindex->GetBlockHash();
392 0 : } else {
393 0 : summary.best_block_height = 0;
394 0 : summary.best_block_hash = m_chain->getBlockHash(0);
395 : }
396 0 : return summary;
397 0 : }
398 :
399 0 : void BaseIndex::SetBestBlockIndex(const CBlockIndex* block)
400 : {
401 0 : assert(!m_chainstate->m_blockman.IsPruneMode() || AllowPrune());
402 :
403 0 : if (AllowPrune() && block) {
404 0 : node::PruneLockInfo prune_lock;
405 0 : prune_lock.height_first = block->nHeight;
406 0 : WITH_LOCK(::cs_main, m_chainstate->m_blockman.UpdatePruneLock(GetName(), prune_lock));
407 0 : }
408 :
409 : // Intentionally set m_best_block_index as the last step in this function,
410 : // after updating prune locks above, and after making any other references
411 : // to *this, so the BlockUntilSyncedToCurrentChain function (which checks
412 : // m_best_block_index as an optimization) can be used to wait for the last
413 : // BlockConnected notification and safely assume that prune locks are
414 : // updated and that the index object is safe to delete.
415 0 : m_best_block_index = block;
416 0 : }
|