Line data Source code
1 : // Copyright (c) 2011-2022 The Bitcoin Core developers
2 : // Distributed under the MIT software license, see the accompanying
3 : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 :
5 : #include <node/blockstorage.h>
6 :
7 : #include <chain.h>
8 : #include <clientversion.h>
9 : #include <consensus/validation.h>
10 : #include <dbwrapper.h>
11 : #include <flatfile.h>
12 : #include <hash.h>
13 : #include <kernel/chainparams.h>
14 : #include <kernel/messagestartchars.h>
15 : #include <logging.h>
16 : #include <pow.h>
17 2 : #include <reverse_iterator.h>
18 2 : #include <signet.h>
19 : #include <streams.h>
20 : #include <sync.h>
21 : #include <undo.h>
22 : #include <util/batchpriority.h>
23 : #include <util/fs.h>
24 : #include <util/signalinterrupt.h>
25 : #include <util/strencodings.h>
26 : #include <util/translation.h>
27 : #include <validation.h>
28 :
29 : #include <map>
30 : #include <unordered_map>
31 :
32 : namespace kernel {
33 : static constexpr uint8_t DB_BLOCK_FILES{'f'};
34 : static constexpr uint8_t DB_BLOCK_INDEX{'b'};
35 : static constexpr uint8_t DB_FLAG{'F'};
36 : static constexpr uint8_t DB_REINDEX_FLAG{'R'};
37 : static constexpr uint8_t DB_LAST_BLOCK{'l'};
38 : // Keys used in previous version that might still be found in the DB:
39 : // BlockTreeDB::DB_TXINDEX_BLOCK{'T'};
40 : // BlockTreeDB::DB_TXINDEX{'t'}
41 : // BlockTreeDB::ReadFlag("txindex")
42 :
43 2 : bool BlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo& info)
44 : {
45 2 : return Read(std::make_pair(DB_BLOCK_FILES, nFile), info);
46 : }
47 :
48 0 : bool BlockTreeDB::WriteReindexing(bool fReindexing)
49 : {
50 0 : if (fReindexing) {
51 0 : return Write(DB_REINDEX_FLAG, uint8_t{'1'});
52 : } else {
53 0 : return Erase(DB_REINDEX_FLAG);
54 : }
55 0 : }
56 :
57 1 : void BlockTreeDB::ReadReindexing(bool& fReindexing)
58 : {
59 1 : fReindexing = Exists(DB_REINDEX_FLAG);
60 1 : }
61 :
62 1 : bool BlockTreeDB::ReadLastBlockFile(int& nFile)
63 : {
64 1 : return Read(DB_LAST_BLOCK, nFile);
65 : }
66 :
67 0 : bool BlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo)
68 : {
69 0 : CDBBatch batch(*this);
70 0 : for (const auto& [file, info] : fileInfo) {
71 0 : batch.Write(std::make_pair(DB_BLOCK_FILES, file), *info);
72 : }
73 0 : batch.Write(DB_LAST_BLOCK, nLastFile);
74 2 : for (const CBlockIndex* bi : blockinfo) {
75 0 : batch.Write(std::make_pair(DB_BLOCK_INDEX, bi->GetBlockHash()), CDiskBlockIndex{bi});
76 : }
77 0 : return WriteBatch(batch, true);
78 0 : }
79 :
80 0 : bool BlockTreeDB::WriteFlag(const std::string& name, bool fValue)
81 : {
82 0 : return Write(std::make_pair(DB_FLAG, name), fValue ? uint8_t{'1'} : uint8_t{'0'});
83 0 : }
84 :
85 1 : bool BlockTreeDB::ReadFlag(const std::string& name, bool& fValue)
86 : {
87 : uint8_t ch;
88 1 : if (!Read(std::make_pair(DB_FLAG, name), ch)) {
89 1 : return false;
90 : }
91 0 : fValue = ch == uint8_t{'1'};
92 0 : return true;
93 1 : }
94 :
95 1 : bool BlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex, const util::SignalInterrupt& interrupt)
96 : {
97 1 : AssertLockHeld(::cs_main);
98 1 : std::unique_ptr<CDBIterator> pcursor(NewIterator());
99 1 : pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256()));
100 :
101 : // Load m_block_index
102 1 : while (pcursor->Valid()) {
103 0 : if (interrupt) return false;
104 0 : std::pair<uint8_t, uint256> key;
105 0 : if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) {
106 0 : CDiskBlockIndex diskindex;
107 0 : if (pcursor->GetValue(diskindex)) {
108 : // Construct block index object
109 0 : CBlockIndex* pindexNew = insertBlockIndex(diskindex.ConstructBlockHash());
110 0 : pindexNew->pprev = insertBlockIndex(diskindex.hashPrev);
111 0 : pindexNew->nHeight = diskindex.nHeight;
112 0 : pindexNew->nFile = diskindex.nFile;
113 0 : pindexNew->nDataPos = diskindex.nDataPos;
114 0 : pindexNew->nUndoPos = diskindex.nUndoPos;
115 0 : pindexNew->nVersion = diskindex.nVersion;
116 0 : pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
117 0 : pindexNew->nTime = diskindex.nTime;
118 0 : pindexNew->nBits = diskindex.nBits;
119 0 : pindexNew->nNonce = diskindex.nNonce;
120 0 : pindexNew->nStatus = diskindex.nStatus;
121 0 : pindexNew->nTx = diskindex.nTx;
122 :
123 0 : if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) {
124 0 : return error("%s: CheckProofOfWork failed: %s", __func__, pindexNew->ToString());
125 : }
126 :
127 0 : pcursor->Next();
128 0 : } else {
129 0 : return error("%s: failed to read value", __func__);
130 : }
131 0 : } else {
132 0 : break;
133 : }
134 : }
135 :
136 1 : return true;
137 1 : }
138 : } // namespace kernel
139 :
140 : namespace node {
141 : std::atomic_bool fReindex(false);
142 :
143 184104 : bool CBlockIndexWorkComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const
144 : {
145 : // First sort by most total work, ...
146 184104 : if (pa->nChainWork > pb->nChainWork) return false;
147 123004 : if (pa->nChainWork < pb->nChainWork) return true;
148 :
149 : // ... then by earliest time received, ...
150 2404 : if (pa->nSequenceId < pb->nSequenceId) return false;
151 2404 : if (pa->nSequenceId > pb->nSequenceId) return true;
152 :
153 : // Use pointer address as tie breaker (should only happen with blocks
154 : // loaded from disk, as those all have id 0).
155 2404 : if (pa < pb) return false;
156 2404 : if (pa > pb) return true;
157 :
158 : // Identical blocks.
159 2404 : return false;
160 184104 : }
161 :
162 0 : bool CBlockIndexHeightOnlyComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const
163 : {
164 0 : return pa->nHeight < pb->nHeight;
165 : }
166 :
167 2 : std::vector<CBlockIndex*> BlockManager::GetAllBlockIndices()
168 : {
169 2 : AssertLockHeld(cs_main);
170 2 : std::vector<CBlockIndex*> rv;
171 2 : rv.reserve(m_block_index.size());
172 2 : for (auto& [_, block_index] : m_block_index) {
173 0 : rv.push_back(&block_index);
174 : }
175 2 : return rv;
176 2 : }
177 :
178 11816 : CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash)
179 : {
180 11816 : AssertLockHeld(cs_main);
181 11816 : BlockMap::iterator it = m_block_index.find(hash);
182 11816 : return it == m_block_index.end() ? nullptr : &it->second;
183 : }
184 :
185 0 : const CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
186 : {
187 0 : AssertLockHeld(cs_main);
188 0 : BlockMap::const_iterator it = m_block_index.find(hash);
189 0 : return it == m_block_index.end() ? nullptr : &it->second;
190 : }
191 :
192 201 : CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block, CBlockIndex*& best_header)
193 : {
194 201 : AssertLockHeld(cs_main);
195 :
196 201 : auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block);
197 201 : if (!inserted) {
198 0 : return &mi->second;
199 : }
200 201 : CBlockIndex* pindexNew = &(*mi).second;
201 :
202 : // We assign the sequence id to blocks only when the full data is available,
203 : // to avoid miners withholding blocks but broadcasting headers, to get a
204 : // competitive advantage.
205 201 : pindexNew->nSequenceId = 0;
206 :
207 201 : pindexNew->phashBlock = &((*mi).first);
208 201 : BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
209 201 : if (miPrev != m_block_index.end()) {
210 200 : pindexNew->pprev = &(*miPrev).second;
211 200 : pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
212 200 : pindexNew->BuildSkip();
213 200 : }
214 201 : pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
215 201 : pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
216 201 : pindexNew->RaiseValidity(BLOCK_VALID_TREE);
217 201 : if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork) {
218 201 : best_header = pindexNew;
219 201 : }
220 :
221 201 : m_dirty_blockindex.insert(pindexNew);
222 :
223 201 : return pindexNew;
224 201 : }
225 :
226 0 : void BlockManager::PruneOneBlockFile(const int fileNumber)
227 : {
228 0 : AssertLockHeld(cs_main);
229 0 : LOCK(cs_LastBlockFile);
230 :
231 0 : for (auto& entry : m_block_index) {
232 0 : CBlockIndex* pindex = &entry.second;
233 0 : if (pindex->nFile == fileNumber) {
234 0 : pindex->nStatus &= ~BLOCK_HAVE_DATA;
235 0 : pindex->nStatus &= ~BLOCK_HAVE_UNDO;
236 0 : pindex->nFile = 0;
237 0 : pindex->nDataPos = 0;
238 0 : pindex->nUndoPos = 0;
239 0 : m_dirty_blockindex.insert(pindex);
240 :
241 : // Prune from m_blocks_unlinked -- any block we prune would have
242 : // to be downloaded again in order to consider its chain, at which
243 : // point it would be considered as a candidate for
244 : // m_blocks_unlinked or setBlockIndexCandidates.
245 0 : auto range = m_blocks_unlinked.equal_range(pindex->pprev);
246 0 : while (range.first != range.second) {
247 0 : std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first;
248 0 : range.first++;
249 0 : if (_it->second == pindex) {
250 0 : m_blocks_unlinked.erase(_it);
251 0 : }
252 : }
253 0 : }
254 : }
255 :
256 0 : m_blockfile_info[fileNumber].SetNull();
257 0 : m_dirty_fileinfo.insert(fileNumber);
258 0 : }
259 :
260 0 : void BlockManager::FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height)
261 : {
262 0 : assert(IsPruneMode() && nManualPruneHeight > 0);
263 :
264 0 : LOCK2(cs_main, cs_LastBlockFile);
265 0 : if (chain_tip_height < 0) {
266 0 : return;
267 : }
268 :
269 : // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
270 0 : unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP);
271 0 : int count = 0;
272 0 : for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) {
273 0 : if (m_blockfile_info[fileNumber].nSize == 0 || m_blockfile_info[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
274 0 : continue;
275 : }
276 0 : PruneOneBlockFile(fileNumber);
277 0 : setFilesToPrune.insert(fileNumber);
278 0 : count++;
279 0 : }
280 0 : LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count);
281 0 : }
282 :
283 0 : void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd)
284 : {
285 0 : LOCK2(cs_main, cs_LastBlockFile);
286 0 : if (chain_tip_height < 0 || GetPruneTarget() == 0) {
287 0 : return;
288 : }
289 0 : if ((uint64_t)chain_tip_height <= nPruneAfterHeight) {
290 0 : return;
291 : }
292 :
293 0 : unsigned int nLastBlockWeCanPrune{(unsigned)std::min(prune_height, chain_tip_height - static_cast<int>(MIN_BLOCKS_TO_KEEP))};
294 0 : uint64_t nCurrentUsage = CalculateCurrentUsage();
295 : // We don't check to prune until after we've allocated new space for files
296 : // So we should leave a buffer under our target to account for another allocation
297 : // before the next pruning.
298 0 : uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
299 : uint64_t nBytesToPrune;
300 0 : int count = 0;
301 :
302 0 : if (nCurrentUsage + nBuffer >= GetPruneTarget()) {
303 : // On a prune event, the chainstate DB is flushed.
304 : // To avoid excessive prune events negating the benefit of high dbcache
305 : // values, we should not prune too rapidly.
306 : // So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon.
307 0 : if (is_ibd) {
308 : // Since this is only relevant during IBD, we use a fixed 10%
309 0 : nBuffer += GetPruneTarget() / 10;
310 0 : }
311 :
312 0 : for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) {
313 0 : nBytesToPrune = m_blockfile_info[fileNumber].nSize + m_blockfile_info[fileNumber].nUndoSize;
314 :
315 0 : if (m_blockfile_info[fileNumber].nSize == 0) {
316 0 : continue;
317 : }
318 :
319 0 : if (nCurrentUsage + nBuffer < GetPruneTarget()) { // are we below our target?
320 0 : break;
321 : }
322 :
323 : // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
324 0 : if (m_blockfile_info[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
325 0 : continue;
326 : }
327 :
328 0 : PruneOneBlockFile(fileNumber);
329 : // Queue up the files for removal
330 0 : setFilesToPrune.insert(fileNumber);
331 0 : nCurrentUsage -= nBytesToPrune;
332 0 : count++;
333 0 : }
334 0 : }
335 :
336 0 : LogPrint(BCLog::PRUNE, "target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
337 : GetPruneTarget() / 1024 / 1024, nCurrentUsage / 1024 / 1024,
338 : (int64_t(GetPruneTarget()) - int64_t(nCurrentUsage)) / 1024 / 1024,
339 : nLastBlockWeCanPrune, count);
340 0 : }
341 :
342 0 : void BlockManager::UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) {
343 0 : AssertLockHeld(::cs_main);
344 0 : m_prune_locks[name] = lock_info;
345 0 : }
346 :
347 0 : CBlockIndex* BlockManager::InsertBlockIndex(const uint256& hash)
348 : {
349 0 : AssertLockHeld(cs_main);
350 :
351 0 : if (hash.IsNull()) {
352 0 : return nullptr;
353 : }
354 :
355 0 : const auto [mi, inserted]{m_block_index.try_emplace(hash)};
356 0 : CBlockIndex* pindex = &(*mi).second;
357 0 : if (inserted) {
358 0 : pindex->phashBlock = &((*mi).first);
359 0 : }
360 0 : return pindex;
361 0 : }
362 :
363 1 : bool BlockManager::LoadBlockIndex()
364 : {
365 2 : if (!m_block_tree_db->LoadBlockIndexGuts(
366 1 : GetConsensus(), [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, m_interrupt)) {
367 0 : return false;
368 : }
369 :
370 : // Calculate nChainWork
371 1 : std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()};
372 1 : std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
373 : CBlockIndexHeightOnlyComparator());
374 :
375 1 : for (CBlockIndex* pindex : vSortedByHeight) {
376 0 : if (m_interrupt) return false;
377 0 : pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
378 0 : pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
379 :
380 : // We can link the chain of blocks for which we've received transactions at some point, or
381 : // blocks that are assumed-valid on the basis of snapshot load (see
382 : // PopulateAndValidateSnapshot()).
383 : // Pruned nodes may have deleted the block.
384 0 : if (pindex->nTx > 0) {
385 0 : if (pindex->pprev) {
386 0 : if (pindex->pprev->nChainTx > 0) {
387 0 : pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
388 0 : } else {
389 0 : pindex->nChainTx = 0;
390 0 : m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
391 : }
392 0 : } else {
393 0 : pindex->nChainTx = pindex->nTx;
394 : }
395 0 : }
396 0 : if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
397 0 : pindex->nStatus |= BLOCK_FAILED_CHILD;
398 0 : m_dirty_blockindex.insert(pindex);
399 0 : }
400 0 : if (pindex->pprev) {
401 0 : pindex->BuildSkip();
402 0 : }
403 : }
404 :
405 1 : return true;
406 1 : }
407 :
408 0 : bool BlockManager::WriteBlockIndexDB()
409 : {
410 0 : AssertLockHeld(::cs_main);
411 0 : std::vector<std::pair<int, const CBlockFileInfo*>> vFiles;
412 0 : vFiles.reserve(m_dirty_fileinfo.size());
413 0 : for (std::set<int>::iterator it = m_dirty_fileinfo.begin(); it != m_dirty_fileinfo.end();) {
414 0 : vFiles.push_back(std::make_pair(*it, &m_blockfile_info[*it]));
415 0 : m_dirty_fileinfo.erase(it++);
416 : }
417 0 : std::vector<const CBlockIndex*> vBlocks;
418 0 : vBlocks.reserve(m_dirty_blockindex.size());
419 0 : for (std::set<CBlockIndex*>::iterator it = m_dirty_blockindex.begin(); it != m_dirty_blockindex.end();) {
420 0 : vBlocks.push_back(*it);
421 0 : m_dirty_blockindex.erase(it++);
422 : }
423 0 : if (!m_block_tree_db->WriteBatchSync(vFiles, m_last_blockfile, vBlocks)) {
424 0 : return false;
425 : }
426 0 : return true;
427 0 : }
428 :
429 1 : bool BlockManager::LoadBlockIndexDB()
430 : {
431 1 : if (!LoadBlockIndex()) {
432 0 : return false;
433 : }
434 :
435 : // Load block file info
436 1 : m_block_tree_db->ReadLastBlockFile(m_last_blockfile);
437 1 : m_blockfile_info.resize(m_last_blockfile + 1);
438 1 : LogPrintf("%s: last block file = %i\n", __func__, m_last_blockfile);
439 2 : for (int nFile = 0; nFile <= m_last_blockfile; nFile++) {
440 1 : m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]);
441 1 : }
442 1 : LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[m_last_blockfile].ToString());
443 1 : for (int nFile = m_last_blockfile + 1; true; nFile++) {
444 1 : CBlockFileInfo info;
445 1 : if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
446 0 : m_blockfile_info.push_back(info);
447 0 : } else {
448 1 : break;
449 : }
450 0 : }
451 :
452 : // Check presence of blk files
453 1 : LogPrintf("Checking all blk files are present...\n");
454 1 : std::set<int> setBlkDataFiles;
455 1 : for (const auto& [_, block_index] : m_block_index) {
456 0 : if (block_index.nStatus & BLOCK_HAVE_DATA) {
457 0 : setBlkDataFiles.insert(block_index.nFile);
458 0 : }
459 : }
460 1 : for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
461 0 : FlatFilePos pos(*it, 0);
462 0 : if (AutoFile{OpenBlockFile(pos, true)}.IsNull()) {
463 0 : return false;
464 : }
465 0 : }
466 :
467 : // Check whether we have ever pruned block & undo files
468 1 : m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned);
469 1 : if (m_have_pruned) {
470 0 : LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
471 0 : }
472 :
473 : // Check whether we need to continue reindexing
474 1 : bool fReindexing = false;
475 1 : m_block_tree_db->ReadReindexing(fReindexing);
476 1 : if (fReindexing) fReindex = true;
477 :
478 1 : return true;
479 1 : }
480 :
481 1 : void BlockManager::ScanAndUnlinkAlreadyPrunedFiles()
482 : {
483 1 : AssertLockHeld(::cs_main);
484 1 : if (!m_have_pruned) {
485 1 : return;
486 : }
487 :
488 0 : std::set<int> block_files_to_prune;
489 0 : for (int file_number = 0; file_number < m_last_blockfile; file_number++) {
490 0 : if (m_blockfile_info[file_number].nSize == 0) {
491 0 : block_files_to_prune.insert(file_number);
492 0 : }
493 0 : }
494 :
495 0 : UnlinkPrunedFiles(block_files_to_prune);
496 1 : }
497 :
498 5259 : const CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
499 : {
500 5259 : const MapCheckpoints& checkpoints = data.mapCheckpoints;
501 :
502 5259 : for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints)) {
503 5259 : const uint256& hash = i.second;
504 5259 : const CBlockIndex* pindex = LookupBlockIndex(hash);
505 5259 : if (pindex) {
506 5259 : return pindex;
507 : }
508 : }
509 0 : return nullptr;
510 5259 : }
511 :
512 0 : bool BlockManager::IsBlockPruned(const CBlockIndex* pblockindex)
513 : {
514 0 : AssertLockHeld(::cs_main);
515 0 : return (m_have_pruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
516 : }
517 :
518 0 : const CBlockIndex* BlockManager::GetFirstStoredBlock(const CBlockIndex& upper_block, const CBlockIndex* lower_block)
519 : {
520 0 : AssertLockHeld(::cs_main);
521 0 : const CBlockIndex* last_block = &upper_block;
522 0 : assert(last_block->nStatus & BLOCK_HAVE_DATA); // 'upper_block' must have data
523 0 : while (last_block->pprev && (last_block->pprev->nStatus & BLOCK_HAVE_DATA)) {
524 0 : if (lower_block) {
525 : // Return if we reached the lower_block
526 0 : if (last_block == lower_block) return lower_block;
527 : // if range was surpassed, means that 'lower_block' is not part of the 'upper_block' chain
528 : // and so far this is not allowed.
529 0 : assert(last_block->nHeight >= lower_block->nHeight);
530 0 : }
531 0 : last_block = last_block->pprev;
532 : }
533 0 : assert(last_block != nullptr);
534 0 : return last_block;
535 0 : }
536 :
537 0 : bool BlockManager::CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block)
538 : {
539 0 : if (!(upper_block.nStatus & BLOCK_HAVE_DATA)) return false;
540 0 : return GetFirstStoredBlock(upper_block, &lower_block) == &lower_block;
541 0 : }
542 :
543 : // If we're using -prune with -reindex, then delete block files that will be ignored by the
544 : // reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
545 : // is missing, do the same here to delete any later block files after a gap. Also delete all
546 : // rev files since they'll be rewritten by the reindex anyway. This ensures that m_blockfile_info
547 : // is in sync with what's actually on disk by the time we start downloading, so that pruning
548 : // works correctly.
549 0 : void BlockManager::CleanupBlockRevFiles() const
550 : {
551 0 : std::map<std::string, fs::path> mapBlockFiles;
552 :
553 : // Glob all blk?????.dat and rev?????.dat files from the blocks directory.
554 : // Remove the rev files immediately and insert the blk file paths into an
555 : // ordered map keyed by block file index.
556 0 : LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
557 0 : for (fs::directory_iterator it(m_opts.blocks_dir); it != fs::directory_iterator(); it++) {
558 0 : const std::string path = fs::PathToString(it->path().filename());
559 0 : if (fs::is_regular_file(*it) &&
560 0 : path.length() == 12 &&
561 0 : path.substr(8,4) == ".dat")
562 : {
563 0 : if (path.substr(0, 3) == "blk") {
564 0 : mapBlockFiles[path.substr(3, 5)] = it->path();
565 0 : } else if (path.substr(0, 3) == "rev") {
566 0 : remove(it->path());
567 0 : }
568 0 : }
569 0 : }
570 :
571 : // Remove all block files that aren't part of a contiguous set starting at
572 : // zero by walking the ordered map (keys are block file indices) by
573 : // keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
574 : // start removing block files.
575 0 : int nContigCounter = 0;
576 0 : for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
577 0 : if (LocaleIndependentAtoi<int>(item.first) == nContigCounter) {
578 0 : nContigCounter++;
579 0 : continue;
580 : }
581 0 : remove(item.second);
582 : }
583 0 : }
584 :
585 0 : CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n)
586 : {
587 0 : LOCK(cs_LastBlockFile);
588 :
589 0 : return &m_blockfile_info.at(n);
590 0 : }
591 :
592 200 : bool BlockManager::UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock) const
593 : {
594 : // Open history file to append
595 200 : AutoFile fileout{OpenUndoFile(pos)};
596 200 : if (fileout.IsNull()) {
597 0 : return error("%s: OpenUndoFile failed", __func__);
598 : }
599 :
600 : // Write index header
601 200 : unsigned int nSize = GetSerializeSize(blockundo, CLIENT_VERSION);
602 200 : fileout << GetParams().MessageStart() << nSize;
603 :
604 : // Write undo data
605 200 : long fileOutPos = ftell(fileout.Get());
606 200 : if (fileOutPos < 0) {
607 0 : return error("%s: ftell failed", __func__);
608 : }
609 200 : pos.nPos = (unsigned int)fileOutPos;
610 200 : fileout << blockundo;
611 :
612 : // calculate & write checksum
613 200 : HashWriter hasher{};
614 200 : hasher << hashBlock;
615 200 : hasher << blockundo;
616 200 : fileout << hasher.GetHash();
617 :
618 200 : return true;
619 200 : }
620 :
621 0 : bool BlockManager::UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& index) const
622 : {
623 0 : const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())};
624 :
625 0 : if (pos.IsNull()) {
626 0 : return error("%s: no undo data available", __func__);
627 : }
628 :
629 : // Open history file to read
630 0 : AutoFile filein{OpenUndoFile(pos, true)};
631 0 : if (filein.IsNull()) {
632 0 : return error("%s: OpenUndoFile failed", __func__);
633 : }
634 :
635 : // Read block
636 0 : uint256 hashChecksum;
637 0 : HashVerifier verifier{filein}; // Use HashVerifier as reserializing may lose data, c.f. commit d342424301013ec47dc146a4beb49d5c9319d80a
638 : try {
639 0 : verifier << index.pprev->GetBlockHash();
640 0 : verifier >> blockundo;
641 0 : filein >> hashChecksum;
642 0 : } catch (const std::exception& e) {
643 0 : return error("%s: Deserialize or I/O error - %s", __func__, e.what());
644 0 : }
645 :
646 : // Verify checksum
647 0 : if (hashChecksum != verifier.GetHash()) {
648 0 : return error("%s: Checksum mismatch", __func__);
649 : }
650 :
651 0 : return true;
652 0 : }
653 :
654 0 : void BlockManager::FlushUndoFile(int block_file, bool finalize)
655 : {
656 0 : FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize);
657 0 : if (!UndoFileSeq().Flush(undo_pos_old, finalize)) {
658 0 : m_opts.notifications.flushError("Flushing undo file to disk failed. This is likely the result of an I/O error.");
659 0 : }
660 0 : }
661 :
662 0 : void BlockManager::FlushBlockFile(bool fFinalize, bool finalize_undo)
663 : {
664 0 : LOCK(cs_LastBlockFile);
665 :
666 0 : if (m_blockfile_info.size() < 1) {
667 : // Return if we haven't loaded any blockfiles yet. This happens during
668 : // chainstate init, when we call ChainstateManager::MaybeRebalanceCaches() (which
669 : // then calls FlushStateToDisk()), resulting in a call to this function before we
670 : // have populated `m_blockfile_info` via LoadBlockIndexDB().
671 0 : return;
672 : }
673 0 : assert(static_cast<int>(m_blockfile_info.size()) > m_last_blockfile);
674 :
675 0 : FlatFilePos block_pos_old(m_last_blockfile, m_blockfile_info[m_last_blockfile].nSize);
676 0 : if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
677 0 : m_opts.notifications.flushError("Flushing block file to disk failed. This is likely the result of an I/O error.");
678 0 : }
679 : // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
680 : // e.g. during IBD or a sync after a node going offline
681 0 : if (!fFinalize || finalize_undo) FlushUndoFile(m_last_blockfile, finalize_undo);
682 0 : }
683 :
684 0 : uint64_t BlockManager::CalculateCurrentUsage()
685 : {
686 0 : LOCK(cs_LastBlockFile);
687 :
688 0 : uint64_t retval = 0;
689 0 : for (const CBlockFileInfo& file : m_blockfile_info) {
690 0 : retval += file.nSize + file.nUndoSize;
691 : }
692 0 : return retval;
693 0 : }
694 :
695 0 : void BlockManager::UnlinkPrunedFiles(const std::set<int>& setFilesToPrune) const
696 : {
697 0 : std::error_code ec;
698 0 : for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
699 0 : FlatFilePos pos(*it, 0);
700 0 : const bool removed_blockfile{fs::remove(BlockFileSeq().FileName(pos), ec)};
701 0 : const bool removed_undofile{fs::remove(UndoFileSeq().FileName(pos), ec)};
702 0 : if (removed_blockfile || removed_undofile) {
703 0 : LogPrint(BCLog::BLOCKSTORAGE, "Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
704 0 : }
705 0 : }
706 0 : }
707 :
708 403 : FlatFileSeq BlockManager::BlockFileSeq() const
709 : {
710 403 : return FlatFileSeq(m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x4000 /* 16kb */ : BLOCKFILE_CHUNK_SIZE);
711 0 : }
712 :
713 400 : FlatFileSeq BlockManager::UndoFileSeq() const
714 : {
715 400 : return FlatFileSeq(m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE);
716 0 : }
717 :
718 202 : FILE* BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const
719 : {
720 202 : return BlockFileSeq().Open(pos, fReadOnly);
721 0 : }
722 :
723 : /** Open an undo file (rev?????.dat) */
724 200 : FILE* BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const
725 : {
726 200 : return UndoFileSeq().Open(pos, fReadOnly);
727 0 : }
728 :
729 0 : fs::path BlockManager::GetBlockPosFilename(const FlatFilePos& pos) const
730 : {
731 0 : return BlockFileSeq().FileName(pos);
732 0 : }
733 :
734 201 : bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown)
735 : {
736 201 : LOCK(cs_LastBlockFile);
737 :
738 201 : unsigned int nFile = fKnown ? pos.nFile : m_last_blockfile;
739 201 : if (m_blockfile_info.size() <= nFile) {
740 0 : m_blockfile_info.resize(nFile + 1);
741 0 : }
742 :
743 201 : bool finalize_undo = false;
744 201 : if (!fKnown) {
745 201 : unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE};
746 : // Use smaller blockfiles in test-only -fastprune mode - but avoid
747 : // the possibility of having a block not fit into the block file.
748 201 : if (m_opts.fast_prune) {
749 0 : max_blockfile_size = 0x10000; // 64kiB
750 0 : if (nAddSize >= max_blockfile_size) {
751 : // dynamically adjust the blockfile size to be larger than the added size
752 0 : max_blockfile_size = nAddSize + 1;
753 0 : }
754 0 : }
755 201 : assert(nAddSize < max_blockfile_size);
756 201 : while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) {
757 : // when the undo file is keeping up with the block file, we want to flush it explicitly
758 : // when it is lagging behind (more blocks arrive than are being connected), we let the
759 : // undo block write case handle it
760 0 : finalize_undo = (m_blockfile_info[nFile].nHeightLast == m_undo_height_in_last_blockfile);
761 0 : nFile++;
762 0 : if (m_blockfile_info.size() <= nFile) {
763 0 : m_blockfile_info.resize(nFile + 1);
764 0 : }
765 : }
766 201 : pos.nFile = nFile;
767 201 : pos.nPos = m_blockfile_info[nFile].nSize;
768 201 : }
769 :
770 201 : if ((int)nFile != m_last_blockfile) {
771 0 : if (!fKnown) {
772 0 : LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s\n", m_last_blockfile, m_blockfile_info[m_last_blockfile].ToString());
773 0 : }
774 0 : FlushBlockFile(!fKnown, finalize_undo);
775 0 : m_last_blockfile = nFile;
776 0 : m_undo_height_in_last_blockfile = 0; // No undo data yet in the new file, so reset our undo-height tracking.
777 0 : }
778 :
779 201 : m_blockfile_info[nFile].AddBlock(nHeight, nTime);
780 201 : if (fKnown) {
781 0 : m_blockfile_info[nFile].nSize = std::max(pos.nPos + nAddSize, m_blockfile_info[nFile].nSize);
782 0 : } else {
783 201 : m_blockfile_info[nFile].nSize += nAddSize;
784 : }
785 :
786 201 : if (!fKnown) {
787 : bool out_of_space;
788 201 : size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
789 201 : if (out_of_space) {
790 0 : m_opts.notifications.fatalError("Disk space is too low!", _("Disk space is too low!"));
791 0 : return false;
792 : }
793 201 : if (bytes_allocated != 0 && IsPruneMode()) {
794 0 : m_check_for_pruning = true;
795 0 : }
796 201 : }
797 :
798 201 : m_dirty_fileinfo.insert(nFile);
799 201 : return true;
800 201 : }
801 :
802 200 : bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
803 : {
804 200 : pos.nFile = nFile;
805 :
806 200 : LOCK(cs_LastBlockFile);
807 :
808 200 : pos.nPos = m_blockfile_info[nFile].nUndoSize;
809 200 : m_blockfile_info[nFile].nUndoSize += nAddSize;
810 200 : m_dirty_fileinfo.insert(nFile);
811 :
812 : bool out_of_space;
813 200 : size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
814 200 : if (out_of_space) {
815 0 : return FatalError(m_opts.notifications, state, "Disk space is too low!", _("Disk space is too low!"));
816 : }
817 200 : if (bytes_allocated != 0 && IsPruneMode()) {
818 0 : m_check_for_pruning = true;
819 0 : }
820 :
821 200 : return true;
822 200 : }
823 :
824 201 : bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const
825 : {
826 : // Open history file to append
827 201 : CAutoFile fileout{OpenBlockFile(pos), CLIENT_VERSION};
828 201 : if (fileout.IsNull()) {
829 0 : return error("WriteBlockToDisk: OpenBlockFile failed");
830 : }
831 :
832 : // Write index header
833 201 : unsigned int nSize = GetSerializeSize(block, fileout.GetVersion());
834 201 : fileout << GetParams().MessageStart() << nSize;
835 :
836 : // Write block
837 201 : long fileOutPos = ftell(fileout.Get());
838 201 : if (fileOutPos < 0) {
839 0 : return error("WriteBlockToDisk: ftell failed");
840 : }
841 201 : pos.nPos = (unsigned int)fileOutPos;
842 201 : fileout << block;
843 :
844 201 : return true;
845 201 : }
846 :
847 200 : bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block)
848 : {
849 200 : AssertLockHeld(::cs_main);
850 : // Write undo information to disk
851 200 : if (block.GetUndoPos().IsNull()) {
852 200 : FlatFilePos _pos;
853 200 : if (!FindUndoPos(state, block.nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40)) {
854 0 : return error("ConnectBlock(): FindUndoPos failed");
855 : }
856 200 : if (!UndoWriteToDisk(blockundo, _pos, block.pprev->GetBlockHash())) {
857 0 : return FatalError(m_opts.notifications, state, "Failed to write undo data");
858 : }
859 : // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
860 : // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
861 : // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
862 : // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
863 : // the FindBlockPos function
864 200 : if (_pos.nFile < m_last_blockfile && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) {
865 0 : FlushUndoFile(_pos.nFile, true);
866 200 : } else if (_pos.nFile == m_last_blockfile && static_cast<uint32_t>(block.nHeight) > m_undo_height_in_last_blockfile) {
867 200 : m_undo_height_in_last_blockfile = block.nHeight;
868 200 : }
869 : // update nUndoPos in block index
870 200 : block.nUndoPos = _pos.nPos;
871 200 : block.nStatus |= BLOCK_HAVE_UNDO;
872 200 : m_dirty_blockindex.insert(&block);
873 200 : }
874 :
875 200 : return true;
876 200 : }
877 :
878 1 : bool BlockManager::ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos) const
879 : {
880 1 : block.SetNull();
881 :
882 : // Open history file to read
883 1 : CAutoFile filein{OpenBlockFile(pos, true), CLIENT_VERSION};
884 1 : if (filein.IsNull()) {
885 0 : return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString());
886 : }
887 :
888 : // Read block
889 : try {
890 1 : filein >> block;
891 1 : } catch (const std::exception& e) {
892 0 : return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString());
893 0 : }
894 :
895 : // Check the header
896 1 : if (!CheckProofOfWork(block.GetHash(), block.nBits, GetConsensus())) {
897 0 : return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString());
898 : }
899 :
900 : // Signet only: check block solution
901 1 : if (GetConsensus().signet_blocks && !CheckSignetBlockSolution(block, GetConsensus())) {
902 0 : return error("ReadBlockFromDisk: Errors in block solution at %s", pos.ToString());
903 : }
904 :
905 1 : return true;
906 1 : }
907 :
908 1 : bool BlockManager::ReadBlockFromDisk(CBlock& block, const CBlockIndex& index) const
909 : {
910 2 : const FlatFilePos block_pos{WITH_LOCK(cs_main, return index.GetBlockPos())};
911 :
912 1 : if (!ReadBlockFromDisk(block, block_pos)) {
913 0 : return false;
914 : }
915 1 : if (block.GetHash() != index.GetBlockHash()) {
916 0 : return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
917 0 : index.ToString(), block_pos.ToString());
918 : }
919 1 : return true;
920 1 : }
921 :
922 0 : bool BlockManager::ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos) const
923 : {
924 0 : FlatFilePos hpos = pos;
925 0 : hpos.nPos -= 8; // Seek back 8 bytes for meta header
926 0 : AutoFile filein{OpenBlockFile(hpos, true)};
927 0 : if (filein.IsNull()) {
928 0 : return error("%s: OpenBlockFile failed for %s", __func__, pos.ToString());
929 : }
930 :
931 : try {
932 : MessageStartChars blk_start;
933 : unsigned int blk_size;
934 :
935 0 : filein >> blk_start >> blk_size;
936 :
937 0 : if (blk_start != GetParams().MessageStart()) {
938 0 : return error("%s: Block magic mismatch for %s: %s versus expected %s", __func__, pos.ToString(),
939 0 : HexStr(blk_start),
940 0 : HexStr(GetParams().MessageStart()));
941 : }
942 :
943 0 : if (blk_size > MAX_SIZE) {
944 0 : return error("%s: Block data is larger than maximum deserialization size for %s: %s versus %s", __func__, pos.ToString(),
945 : blk_size, MAX_SIZE);
946 : }
947 :
948 0 : block.resize(blk_size); // Zeroing of memory is intentional here
949 0 : filein.read(MakeWritableByteSpan(block));
950 0 : } catch (const std::exception& e) {
951 0 : return error("%s: Read from block file failed: %s for %s", __func__, e.what(), pos.ToString());
952 0 : }
953 :
954 0 : return true;
955 0 : }
956 :
957 201 : FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight, const FlatFilePos* dbp)
958 : {
959 201 : unsigned int nBlockSize = ::GetSerializeSize(block, CLIENT_VERSION);
960 201 : FlatFilePos blockPos;
961 201 : const auto position_known {dbp != nullptr};
962 201 : if (position_known) {
963 0 : blockPos = *dbp;
964 0 : } else {
965 : // when known, blockPos.nPos points at the offset of the block data in the blk file. that already accounts for
966 : // the serialization header present in the file (the 4 magic message start bytes + the 4 length bytes = 8 bytes = BLOCK_SERIALIZATION_HEADER_SIZE).
967 : // we add BLOCK_SERIALIZATION_HEADER_SIZE only for new blocks since they will have the serialization header added when written to disk.
968 201 : nBlockSize += static_cast<unsigned int>(BLOCK_SERIALIZATION_HEADER_SIZE);
969 : }
970 201 : if (!FindBlockPos(blockPos, nBlockSize, nHeight, block.GetBlockTime(), position_known)) {
971 0 : error("%s: FindBlockPos failed", __func__);
972 0 : return FlatFilePos();
973 : }
974 201 : if (!position_known) {
975 201 : if (!WriteBlockToDisk(block, blockPos)) {
976 0 : m_opts.notifications.fatalError("Failed to write block");
977 0 : return FlatFilePos();
978 : }
979 201 : }
980 201 : return blockPos;
981 201 : }
982 :
983 : class ImportingNow
984 : {
985 : std::atomic<bool>& m_importing;
986 :
987 : public:
988 0 : ImportingNow(std::atomic<bool>& importing) : m_importing{importing}
989 : {
990 0 : assert(m_importing == false);
991 0 : m_importing = true;
992 0 : }
993 0 : ~ImportingNow()
994 : {
995 0 : assert(m_importing == true);
996 0 : m_importing = false;
997 0 : }
998 : };
999 :
1000 0 : void ImportBlocks(ChainstateManager& chainman, std::vector<fs::path> vImportFiles)
1001 : {
1002 0 : ScheduleBatchPriority();
1003 :
1004 : {
1005 0 : ImportingNow imp{chainman.m_blockman.m_importing};
1006 :
1007 : // -reindex
1008 0 : if (fReindex) {
1009 0 : int nFile = 0;
1010 : // Map of disk positions for blocks with unknown parent (only used for reindex);
1011 : // parent hash -> child disk position, multiple children can have the same parent.
1012 0 : std::multimap<uint256, FlatFilePos> blocks_with_unknown_parent;
1013 0 : while (true) {
1014 0 : FlatFilePos pos(nFile, 0);
1015 0 : if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) {
1016 0 : break; // No block files left to reindex
1017 : }
1018 0 : FILE* file = chainman.m_blockman.OpenBlockFile(pos, true);
1019 0 : if (!file) {
1020 0 : break; // This error is logged in OpenBlockFile
1021 : }
1022 0 : LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile);
1023 0 : chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent);
1024 0 : if (chainman.m_interrupt) {
1025 0 : LogPrintf("Interrupt requested. Exit %s\n", __func__);
1026 0 : return;
1027 : }
1028 0 : nFile++;
1029 : }
1030 0 : WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false));
1031 0 : fReindex = false;
1032 0 : LogPrintf("Reindexing finished\n");
1033 : // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked):
1034 0 : chainman.ActiveChainstate().LoadGenesisBlock();
1035 0 : }
1036 :
1037 : // -loadblock=
1038 0 : for (const fs::path& path : vImportFiles) {
1039 0 : FILE* file = fsbridge::fopen(path, "rb");
1040 0 : if (file) {
1041 0 : LogPrintf("Importing blocks file %s...\n", fs::PathToString(path));
1042 0 : chainman.LoadExternalBlockFile(file);
1043 0 : if (chainman.m_interrupt) {
1044 0 : LogPrintf("Interrupt requested. Exit %s\n", __func__);
1045 0 : return;
1046 : }
1047 0 : } else {
1048 0 : LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path));
1049 : }
1050 : }
1051 :
1052 : // scan for better chains in the block chain database, that are not yet connected in the active best chain
1053 :
1054 : // We can't hold cs_main during ActivateBestChain even though we're accessing
1055 : // the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve
1056 : // the relevant pointers before the ABC call.
1057 0 : for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) {
1058 0 : BlockValidationState state;
1059 0 : if (!chainstate->ActivateBestChain(state, nullptr)) {
1060 0 : chainman.GetNotifications().fatalError(strprintf("Failed to connect best block (%s)", state.ToString()));
1061 0 : return;
1062 : }
1063 0 : }
1064 0 : } // End scope of ImportingNow
1065 0 : }
1066 : } // namespace node
|