/bitcoin/src/index/blockfilterindex.cpp
Line | Count | Source |
1 | | // Copyright (c) 2018-2022 The Bitcoin Core developers |
2 | | // Distributed under the MIT software license, see the accompanying |
3 | | // file COPYING or http://www.opensource.org/licenses/mit-license.php. |
4 | | |
5 | | #include <map> |
6 | | |
7 | | #include <clientversion.h> |
8 | | #include <common/args.h> |
9 | | #include <dbwrapper.h> |
10 | | #include <hash.h> |
11 | | #include <index/blockfilterindex.h> |
12 | | #include <logging.h> |
13 | | #include <node/blockstorage.h> |
14 | | #include <undo.h> |
15 | | #include <util/fs_helpers.h> |
16 | | #include <validation.h> |
17 | | |
18 | | /* The index database stores three items for each block: the disk location of the encoded filter, |
19 | | * its dSHA256 hash, and the header. Those belonging to blocks on the active chain are indexed by |
20 | | * height, and those belonging to blocks that have been reorganized out of the active chain are |
21 | | * indexed by block hash. This ensures that filter data for any block that becomes part of the |
22 | | * active chain can always be retrieved, alleviating timing concerns. |
23 | | * |
24 | | * The filters themselves are stored in flat files and referenced by the LevelDB entries. This |
25 | | * minimizes the amount of data written to LevelDB and keeps the database values constant size. The |
26 | | * disk location of the next block filter to be written (represented as a FlatFilePos) is stored |
27 | | * under the DB_FILTER_POS key. |
28 | | * |
29 | | * Keys for the height index have the type [DB_BLOCK_HEIGHT, uint32 (BE)]. The height is represented |
30 | | * as big-endian so that sequential reads of filters by height are fast. |
31 | | * Keys for the hash index have the type [DB_BLOCK_HASH, uint256]. |
32 | | */ |
33 | | constexpr uint8_t DB_BLOCK_HASH{'s'}; |
34 | | constexpr uint8_t DB_BLOCK_HEIGHT{'t'}; |
35 | | constexpr uint8_t DB_FILTER_POS{'P'}; |
36 | | |
37 | | constexpr unsigned int MAX_FLTR_FILE_SIZE = 0x1000000; // 16 MiB |
38 | | /** The pre-allocation chunk size for fltr?????.dat files */ |
39 | | constexpr unsigned int FLTR_FILE_CHUNK_SIZE = 0x100000; // 1 MiB |
40 | | /** Maximum size of the cfheaders cache |
41 | | * We have a limit to prevent a bug in filling this cache |
42 | | * potentially turning into an OOM. At 2000 entries, this cache |
43 | | * is big enough for a 2,000,000 length block chain, which |
44 | | * we should be enough until ~2047. */ |
45 | | constexpr size_t CF_HEADERS_CACHE_MAX_SZ{2000}; |
46 | | |
47 | | namespace { |
48 | | |
49 | | struct DBVal { |
50 | | uint256 hash; |
51 | | uint256 header; |
52 | | FlatFilePos pos; |
53 | | |
54 | 2.25M | SERIALIZE_METHODS(DBVal, obj) { READWRITE(obj.hash, obj.header, obj.pos); } blockfilterindex.cpp:void (anonymous namespace)::DBVal::SerializationOps<DataStream, (anonymous namespace)::DBVal, ActionUnserialize>((anonymous namespace)::DBVal&, DataStream&, ActionUnserialize) Line | Count | Source | 54 | 9.13k | SERIALIZE_METHODS(DBVal, obj) { READWRITE(obj.hash, obj.header, obj.pos); } |
blockfilterindex.cpp:void (anonymous namespace)::DBVal::SerializationOps<DataStream, (anonymous namespace)::DBVal const, ActionSerialize>((anonymous namespace)::DBVal const&, DataStream&, ActionSerialize) Line | Count | Source | 54 | 2.24M | SERIALIZE_METHODS(DBVal, obj) { READWRITE(obj.hash, obj.header, obj.pos); } |
|
55 | | }; |
56 | | |
57 | | struct DBHeightKey { |
58 | | int height; |
59 | | |
60 | 2.24M | explicit DBHeightKey(int height_in) : height(height_in) {} |
61 | | |
62 | | template<typename Stream> |
63 | | void Serialize(Stream& s) const |
64 | 2.24M | { |
65 | 2.24M | ser_writedata8(s, DB_BLOCK_HEIGHT); |
66 | 2.24M | ser_writedata32be(s, height); |
67 | 2.24M | } |
68 | | |
69 | | template<typename Stream> |
70 | | void Unserialize(Stream& s) |
71 | 6.51k | { |
72 | 6.51k | const uint8_t prefix{ser_readdata8(s)}; |
73 | 6.51k | if (prefix != DB_BLOCK_HEIGHT) { Branch (73:13): [True: 0, False: 6.51k]
|
74 | 0 | throw std::ios_base::failure("Invalid format for block filter index DB height key"); |
75 | 0 | } |
76 | 6.51k | height = ser_readdata32be(s); |
77 | 6.51k | } |
78 | | }; |
79 | | |
80 | | struct DBHashKey { |
81 | | uint256 hash; |
82 | | |
83 | 6.51k | explicit DBHashKey(const uint256& hash_in) : hash(hash_in) {} |
84 | | |
85 | 6.51k | SERIALIZE_METHODS(DBHashKey, obj) { |
86 | 6.51k | uint8_t prefix{DB_BLOCK_HASH}; |
87 | 6.51k | READWRITE(prefix); |
88 | 6.51k | if (prefix != DB_BLOCK_HASH) { Branch (88:13): [True: 0, False: 6.51k]
|
89 | 0 | throw std::ios_base::failure("Invalid format for block filter index DB hash key"); |
90 | 0 | } |
91 | | |
92 | 6.51k | READWRITE(obj.hash); |
93 | 6.51k | } |
94 | | }; |
95 | | |
96 | | }; // namespace |
97 | | |
98 | | static std::map<BlockFilterType, BlockFilterIndex> g_filter_indexes; |
99 | | |
100 | | BlockFilterIndex::BlockFilterIndex(std::unique_ptr<interfaces::Chain> chain, BlockFilterType filter_type, |
101 | | size_t n_cache_size, bool f_memory, bool f_wipe) |
102 | 11.0k | : BaseIndex(std::move(chain), BlockFilterTypeName(filter_type) + " block filter index") |
103 | 11.0k | , m_filter_type(filter_type) |
104 | 11.0k | { |
105 | 11.0k | const std::string& filter_name = BlockFilterTypeName(filter_type); |
106 | 11.0k | if (filter_name.empty()) throw std::invalid_argument("unknown filter_type"); Branch (106:9): [True: 0, False: 11.0k]
|
107 | | |
108 | 11.0k | fs::path path = gArgs.GetDataDirNet() / "indexes" / "blockfilter" / fs::u8path(filter_name); |
109 | 11.0k | fs::create_directories(path); |
110 | | |
111 | 11.0k | m_db = std::make_unique<BaseIndex::DB>(path / "db", n_cache_size, f_memory, f_wipe); |
112 | 11.0k | m_filter_fileseq = std::make_unique<FlatFileSeq>(std::move(path), "fltr", FLTR_FILE_CHUNK_SIZE); |
113 | 11.0k | } |
114 | | |
115 | | bool BlockFilterIndex::CustomInit(const std::optional<interfaces::BlockRef>& block) |
116 | 11.0k | { |
117 | 11.0k | if (!m_db->Read(DB_FILTER_POS, m_next_filter_pos)) { Branch (117:9): [True: 11.0k, False: 0]
|
118 | | // Check that the cause of the read failure is that the key does not exist. Any other errors |
119 | | // indicate database corruption or a disk failure, and starting the index would cause |
120 | | // further corruption. |
121 | 11.0k | if (m_db->Exists(DB_FILTER_POS)) { Branch (121:13): [True: 0, False: 11.0k]
|
122 | 0 | LogError("%s: Cannot read current %s state; index may be corrupted\n", |
123 | 0 | __func__, GetName()); |
124 | 0 | return false; |
125 | 0 | } |
126 | | |
127 | | // If the DB_FILTER_POS is not set, then initialize to the first location. |
128 | 11.0k | m_next_filter_pos.nFile = 0; |
129 | 11.0k | m_next_filter_pos.nPos = 0; |
130 | 11.0k | } |
131 | | |
132 | 11.0k | if (block) { Branch (132:9): [True: 0, False: 11.0k]
|
133 | 0 | auto op_last_header = ReadFilterHeader(block->height, block->hash); |
134 | 0 | if (!op_last_header) { Branch (134:13): [True: 0, False: 0]
|
135 | 0 | LogError("Cannot read last block filter header; index may be corrupted\n"); |
136 | 0 | return false; |
137 | 0 | } |
138 | 0 | m_last_header = *op_last_header; |
139 | 0 | } |
140 | | |
141 | 11.0k | return true; |
142 | 11.0k | } |
143 | | |
144 | | bool BlockFilterIndex::CustomCommit(CDBBatch& batch) |
145 | 20.6k | { |
146 | 20.6k | const FlatFilePos& pos = m_next_filter_pos; |
147 | | |
148 | | // Flush current filter file to disk. |
149 | 20.6k | AutoFile file{m_filter_fileseq->Open(pos)}; |
150 | 20.6k | if (file.IsNull()) { Branch (150:9): [True: 0, False: 20.6k]
|
151 | 0 | LogError("%s: Failed to open filter file %d\n", __func__, pos.nFile); |
152 | 0 | return false; |
153 | 0 | } |
154 | 20.6k | if (!file.Commit()) { Branch (154:9): [True: 0, False: 20.6k]
|
155 | 0 | LogError("%s: Failed to commit filter file %d\n", __func__, pos.nFile); |
156 | 0 | return false; |
157 | 0 | } |
158 | | |
159 | 20.6k | batch.Write(DB_FILTER_POS, pos); |
160 | 20.6k | return true; |
161 | 20.6k | } |
162 | | |
163 | | bool BlockFilterIndex::ReadFilterFromDisk(const FlatFilePos& pos, const uint256& hash, BlockFilter& filter) const |
164 | 0 | { |
165 | 0 | AutoFile filein{m_filter_fileseq->Open(pos, true)}; |
166 | 0 | if (filein.IsNull()) { Branch (166:9): [True: 0, False: 0]
|
167 | 0 | return false; |
168 | 0 | } |
169 | | |
170 | | // Check that the hash of the encoded_filter matches the one stored in the db. |
171 | 0 | uint256 block_hash; |
172 | 0 | std::vector<uint8_t> encoded_filter; |
173 | 0 | try { |
174 | 0 | filein >> block_hash >> encoded_filter; |
175 | 0 | if (Hash(encoded_filter) != hash) { Branch (175:13): [True: 0, False: 0]
|
176 | 0 | LogError("Checksum mismatch in filter decode.\n"); |
177 | 0 | return false; |
178 | 0 | } |
179 | 0 | filter = BlockFilter(GetFilterType(), block_hash, std::move(encoded_filter), /*skip_decode_check=*/true); |
180 | 0 | } |
181 | 0 | catch (const std::exception& e) { |
182 | 0 | LogError("%s: Failed to deserialize block filter from disk: %s\n", __func__, e.what()); |
183 | 0 | return false; |
184 | 0 | } |
185 | | |
186 | 0 | return true; |
187 | 0 | } |
188 | | |
189 | | size_t BlockFilterIndex::WriteFilterToDisk(FlatFilePos& pos, const BlockFilter& filter) |
190 | 2.23M | { |
191 | 2.23M | assert(filter.GetFilterType() == GetFilterType()); Branch (191:5): [True: 2.23M, False: 0]
|
192 | | |
193 | 2.23M | size_t data_size = |
194 | 2.23M | GetSerializeSize(filter.GetBlockHash()) + |
195 | 2.23M | GetSerializeSize(filter.GetEncodedFilter()); |
196 | | |
197 | | // If writing the filter would overflow the file, flush and move to the next one. |
198 | 2.23M | if (pos.nPos + data_size > MAX_FLTR_FILE_SIZE) { Branch (198:9): [True: 0, False: 2.23M]
|
199 | 0 | AutoFile last_file{m_filter_fileseq->Open(pos)}; |
200 | 0 | if (last_file.IsNull()) { Branch (200:13): [True: 0, False: 0]
|
201 | 0 | LogPrintf("%s: Failed to open filter file %d\n", __func__, pos.nFile); |
202 | 0 | return 0; |
203 | 0 | } |
204 | 0 | if (!last_file.Truncate(pos.nPos)) { Branch (204:13): [True: 0, False: 0]
|
205 | 0 | LogPrintf("%s: Failed to truncate filter file %d\n", __func__, pos.nFile); |
206 | 0 | return 0; |
207 | 0 | } |
208 | 0 | if (!last_file.Commit()) { Branch (208:13): [True: 0, False: 0]
|
209 | 0 | LogPrintf("%s: Failed to commit filter file %d\n", __func__, pos.nFile); |
210 | 0 | return 0; |
211 | 0 | } |
212 | | |
213 | 0 | pos.nFile++; |
214 | 0 | pos.nPos = 0; |
215 | 0 | } |
216 | | |
217 | | // Pre-allocate sufficient space for filter data. |
218 | 2.23M | bool out_of_space; |
219 | 2.23M | m_filter_fileseq->Allocate(pos, data_size, out_of_space); |
220 | 2.23M | if (out_of_space) { Branch (220:9): [True: 0, False: 2.23M]
|
221 | 0 | LogPrintf("%s: out of disk space\n", __func__); |
222 | 0 | return 0; |
223 | 0 | } |
224 | | |
225 | 2.23M | AutoFile fileout{m_filter_fileseq->Open(pos)}; |
226 | 2.23M | if (fileout.IsNull()) { Branch (226:9): [True: 0, False: 2.23M]
|
227 | 0 | LogPrintf("%s: Failed to open filter file %d\n", __func__, pos.nFile); |
228 | 0 | return 0; |
229 | 0 | } |
230 | | |
231 | 2.23M | fileout << filter.GetBlockHash() << filter.GetEncodedFilter(); |
232 | 2.23M | return data_size; |
233 | 2.23M | } |
234 | | |
235 | | std::optional<uint256> BlockFilterIndex::ReadFilterHeader(int height, const uint256& expected_block_hash) |
236 | 2.62k | { |
237 | 2.62k | std::pair<uint256, DBVal> read_out; |
238 | 2.62k | if (!m_db->Read(DBHeightKey(height), read_out)) { Branch (238:9): [True: 0, False: 2.62k]
|
239 | 0 | return std::nullopt; |
240 | 0 | } |
241 | | |
242 | 2.62k | if (read_out.first != expected_block_hash) { Branch (242:9): [True: 0, False: 2.62k]
|
243 | 0 | LogError("%s: previous block header belongs to unexpected block %s; expected %s\n", |
244 | 0 | __func__, read_out.first.ToString(), expected_block_hash.ToString()); |
245 | 0 | return std::nullopt; |
246 | 0 | } |
247 | | |
248 | 2.62k | return read_out.second.header; |
249 | 2.62k | } |
250 | | |
251 | | bool BlockFilterIndex::CustomAppend(const interfaces::BlockInfo& block) |
252 | 2.23M | { |
253 | 2.23M | CBlockUndo block_undo; |
254 | | |
255 | 2.23M | if (block.height > 0) { Branch (255:9): [True: 2.22M, False: 11.0k]
|
256 | | // pindex variable gives indexing code access to node internals. It |
257 | | // will be removed in upcoming commit |
258 | 2.22M | const CBlockIndex* pindex = WITH_LOCK(cs_main, return m_chainstate->m_blockman.LookupBlockIndex(block.hash)); |
259 | 2.22M | if (!m_chainstate->m_blockman.ReadBlockUndo(block_undo, *pindex)) { Branch (259:13): [True: 0, False: 2.22M]
|
260 | 0 | return false; |
261 | 0 | } |
262 | 2.22M | } |
263 | | |
264 | 2.23M | BlockFilter filter(m_filter_type, *Assert(block.data), block_undo); |
265 | | |
266 | 2.23M | const uint256& header = filter.ComputeHeader(m_last_header); |
267 | 2.23M | bool res = Write(filter, block.height, header); |
268 | 2.23M | if (res) m_last_header = header; // update last header Branch (268:9): [True: 2.23M, False: 0]
|
269 | 2.23M | return res; |
270 | 2.23M | } |
271 | | |
272 | | bool BlockFilterIndex::Write(const BlockFilter& filter, uint32_t block_height, const uint256& filter_header) |
273 | 2.23M | { |
274 | 2.23M | size_t bytes_written = WriteFilterToDisk(m_next_filter_pos, filter); |
275 | 2.23M | if (bytes_written == 0) return false; Branch (275:9): [True: 0, False: 2.23M]
|
276 | | |
277 | 2.23M | std::pair<uint256, DBVal> value; |
278 | 2.23M | value.first = filter.GetBlockHash(); |
279 | 2.23M | value.second.hash = filter.GetHash(); |
280 | 2.23M | value.second.header = filter_header; |
281 | 2.23M | value.second.pos = m_next_filter_pos; |
282 | | |
283 | 2.23M | if (!m_db->Write(DBHeightKey(block_height), value)) { Branch (283:9): [True: 0, False: 2.23M]
|
284 | 0 | return false; |
285 | 0 | } |
286 | | |
287 | 2.23M | m_next_filter_pos.nPos += bytes_written; |
288 | 2.23M | return true; |
289 | 2.23M | } |
290 | | |
291 | | [[nodiscard]] static bool CopyHeightIndexToHashIndex(CDBIterator& db_it, CDBBatch& batch, |
292 | | const std::string& index_name, |
293 | | int start_height, int stop_height) |
294 | 2.62k | { |
295 | 2.62k | DBHeightKey key(start_height); |
296 | 2.62k | db_it.Seek(key); |
297 | | |
298 | 9.13k | for (int height = start_height; height <= stop_height; ++height) { Branch (298:37): [True: 6.51k, False: 2.62k]
|
299 | 6.51k | if (!db_it.GetKey(key) || key.height != height) { Branch (299:13): [True: 0, False: 6.51k]
Branch (299:35): [True: 0, False: 6.51k]
|
300 | 0 | LogError("%s: unexpected key in %s: expected (%c, %d)\n", |
301 | 0 | __func__, index_name, DB_BLOCK_HEIGHT, height); |
302 | 0 | return false; |
303 | 0 | } |
304 | | |
305 | 6.51k | std::pair<uint256, DBVal> value; |
306 | 6.51k | if (!db_it.GetValue(value)) { Branch (306:13): [True: 0, False: 6.51k]
|
307 | 0 | LogError("%s: unable to read value in %s at key (%c, %d)\n", |
308 | 0 | __func__, index_name, DB_BLOCK_HEIGHT, height); |
309 | 0 | return false; |
310 | 0 | } |
311 | | |
312 | 6.51k | batch.Write(DBHashKey(value.first), std::move(value.second)); |
313 | | |
314 | 6.51k | db_it.Next(); |
315 | 6.51k | } |
316 | 2.62k | return true; |
317 | 2.62k | } |
318 | | |
319 | | bool BlockFilterIndex::CustomRewind(const interfaces::BlockRef& current_tip, const interfaces::BlockRef& new_tip) |
320 | 2.62k | { |
321 | 2.62k | CDBBatch batch(*m_db); |
322 | 2.62k | std::unique_ptr<CDBIterator> db_it(m_db->NewIterator()); |
323 | | |
324 | | // During a reorg, we need to copy all filters for blocks that are getting disconnected from the |
325 | | // height index to the hash index so we can still find them when the height index entries are |
326 | | // overwritten. |
327 | 2.62k | if (!CopyHeightIndexToHashIndex(*db_it, batch, m_name, new_tip.height, current_tip.height)) { Branch (327:9): [True: 0, False: 2.62k]
|
328 | 0 | return false; |
329 | 0 | } |
330 | | |
331 | | // The latest filter position gets written in Commit by the call to the BaseIndex::Rewind. |
332 | | // But since this creates new references to the filter, the position should get updated here |
333 | | // atomically as well in case Commit fails. |
334 | 2.62k | batch.Write(DB_FILTER_POS, m_next_filter_pos); |
335 | 2.62k | if (!m_db->WriteBatch(batch)) return false; Branch (335:9): [True: 0, False: 2.62k]
|
336 | | |
337 | | // Update cached header |
338 | 2.62k | m_last_header = *Assert(ReadFilterHeader(new_tip.height, new_tip.hash)); |
339 | 2.62k | return true; |
340 | 2.62k | } |
341 | | |
342 | | static bool LookupOne(const CDBWrapper& db, const CBlockIndex* block_index, DBVal& result) |
343 | 0 | { |
344 | | // First check if the result is stored under the height index and the value there matches the |
345 | | // block hash. This should be the case if the block is on the active chain. |
346 | 0 | std::pair<uint256, DBVal> read_out; |
347 | 0 | if (!db.Read(DBHeightKey(block_index->nHeight), read_out)) { Branch (347:9): [True: 0, False: 0]
|
348 | 0 | return false; |
349 | 0 | } |
350 | 0 | if (read_out.first == block_index->GetBlockHash()) { Branch (350:9): [True: 0, False: 0]
|
351 | 0 | result = std::move(read_out.second); |
352 | 0 | return true; |
353 | 0 | } |
354 | | |
355 | | // If value at the height index corresponds to an different block, the result will be stored in |
356 | | // the hash index. |
357 | 0 | return db.Read(DBHashKey(block_index->GetBlockHash()), result); |
358 | 0 | } |
359 | | |
360 | | static bool LookupRange(CDBWrapper& db, const std::string& index_name, int start_height, |
361 | | const CBlockIndex* stop_index, std::vector<DBVal>& results) |
362 | 0 | { |
363 | 0 | if (start_height < 0) { Branch (363:9): [True: 0, False: 0]
|
364 | 0 | LogError("%s: start height (%d) is negative\n", __func__, start_height); |
365 | 0 | return false; |
366 | 0 | } |
367 | 0 | if (start_height > stop_index->nHeight) { Branch (367:9): [True: 0, False: 0]
|
368 | 0 | LogError("%s: start height (%d) is greater than stop height (%d)\n", |
369 | 0 | __func__, start_height, stop_index->nHeight); |
370 | 0 | return false; |
371 | 0 | } |
372 | | |
373 | 0 | size_t results_size = static_cast<size_t>(stop_index->nHeight - start_height + 1); |
374 | 0 | std::vector<std::pair<uint256, DBVal>> values(results_size); |
375 | |
|
376 | 0 | DBHeightKey key(start_height); |
377 | 0 | std::unique_ptr<CDBIterator> db_it(db.NewIterator()); |
378 | 0 | db_it->Seek(DBHeightKey(start_height)); |
379 | 0 | for (int height = start_height; height <= stop_index->nHeight; ++height) { Branch (379:37): [True: 0, False: 0]
|
380 | 0 | if (!db_it->Valid() || !db_it->GetKey(key) || key.height != height) { Branch (380:13): [True: 0, False: 0]
Branch (380:32): [True: 0, False: 0]
Branch (380:55): [True: 0, False: 0]
|
381 | 0 | return false; |
382 | 0 | } |
383 | | |
384 | 0 | size_t i = static_cast<size_t>(height - start_height); |
385 | 0 | if (!db_it->GetValue(values[i])) { Branch (385:13): [True: 0, False: 0]
|
386 | 0 | LogError("%s: unable to read value in %s at key (%c, %d)\n", |
387 | 0 | __func__, index_name, DB_BLOCK_HEIGHT, height); |
388 | 0 | return false; |
389 | 0 | } |
390 | | |
391 | 0 | db_it->Next(); |
392 | 0 | } |
393 | | |
394 | 0 | results.resize(results_size); |
395 | | |
396 | | // Iterate backwards through block indexes collecting results in order to access the block hash |
397 | | // of each entry in case we need to look it up in the hash index. |
398 | 0 | for (const CBlockIndex* block_index = stop_index; |
399 | 0 | block_index && block_index->nHeight >= start_height; Branch (399:10): [True: 0, False: 0]
Branch (399:25): [True: 0, False: 0]
|
400 | 0 | block_index = block_index->pprev) { |
401 | 0 | uint256 block_hash = block_index->GetBlockHash(); |
402 | |
|
403 | 0 | size_t i = static_cast<size_t>(block_index->nHeight - start_height); |
404 | 0 | if (block_hash == values[i].first) { Branch (404:13): [True: 0, False: 0]
|
405 | 0 | results[i] = std::move(values[i].second); |
406 | 0 | continue; |
407 | 0 | } |
408 | | |
409 | 0 | if (!db.Read(DBHashKey(block_hash), results[i])) { Branch (409:13): [True: 0, False: 0]
|
410 | 0 | LogError("%s: unable to read value in %s at key (%c, %s)\n", |
411 | 0 | __func__, index_name, DB_BLOCK_HASH, block_hash.ToString()); |
412 | 0 | return false; |
413 | 0 | } |
414 | 0 | } |
415 | | |
416 | 0 | return true; |
417 | 0 | } |
418 | | |
419 | | bool BlockFilterIndex::LookupFilter(const CBlockIndex* block_index, BlockFilter& filter_out) const |
420 | 0 | { |
421 | 0 | DBVal entry; |
422 | 0 | if (!LookupOne(*m_db, block_index, entry)) { Branch (422:9): [True: 0, False: 0]
|
423 | 0 | return false; |
424 | 0 | } |
425 | | |
426 | 0 | return ReadFilterFromDisk(entry.pos, entry.hash, filter_out); |
427 | 0 | } |
428 | | |
429 | | bool BlockFilterIndex::LookupFilterHeader(const CBlockIndex* block_index, uint256& header_out) |
430 | 0 | { |
431 | 0 | LOCK(m_cs_headers_cache); |
432 | |
|
433 | 0 | bool is_checkpoint{block_index->nHeight % CFCHECKPT_INTERVAL == 0}; |
434 | |
|
435 | 0 | if (is_checkpoint) { Branch (435:9): [True: 0, False: 0]
|
436 | | // Try to find the block in the headers cache if this is a checkpoint height. |
437 | 0 | auto header = m_headers_cache.find(block_index->GetBlockHash()); |
438 | 0 | if (header != m_headers_cache.end()) { Branch (438:13): [True: 0, False: 0]
|
439 | 0 | header_out = header->second; |
440 | 0 | return true; |
441 | 0 | } |
442 | 0 | } |
443 | | |
444 | 0 | DBVal entry; |
445 | 0 | if (!LookupOne(*m_db, block_index, entry)) { Branch (445:9): [True: 0, False: 0]
|
446 | 0 | return false; |
447 | 0 | } |
448 | | |
449 | 0 | if (is_checkpoint && Branch (449:9): [True: 0, False: 0]
|
450 | 0 | m_headers_cache.size() < CF_HEADERS_CACHE_MAX_SZ) { Branch (450:9): [True: 0, False: 0]
|
451 | | // Add to the headers cache if this is a checkpoint height. |
452 | 0 | m_headers_cache.emplace(block_index->GetBlockHash(), entry.header); |
453 | 0 | } |
454 | |
|
455 | 0 | header_out = entry.header; |
456 | 0 | return true; |
457 | 0 | } |
458 | | |
459 | | bool BlockFilterIndex::LookupFilterRange(int start_height, const CBlockIndex* stop_index, |
460 | | std::vector<BlockFilter>& filters_out) const |
461 | 0 | { |
462 | 0 | std::vector<DBVal> entries; |
463 | 0 | if (!LookupRange(*m_db, m_name, start_height, stop_index, entries)) { Branch (463:9): [True: 0, False: 0]
|
464 | 0 | return false; |
465 | 0 | } |
466 | | |
467 | 0 | filters_out.resize(entries.size()); |
468 | 0 | auto filter_pos_it = filters_out.begin(); |
469 | 0 | for (const auto& entry : entries) { Branch (469:28): [True: 0, False: 0]
|
470 | 0 | if (!ReadFilterFromDisk(entry.pos, entry.hash, *filter_pos_it)) { Branch (470:13): [True: 0, False: 0]
|
471 | 0 | return false; |
472 | 0 | } |
473 | 0 | ++filter_pos_it; |
474 | 0 | } |
475 | | |
476 | 0 | return true; |
477 | 0 | } |
478 | | |
479 | | bool BlockFilterIndex::LookupFilterHashRange(int start_height, const CBlockIndex* stop_index, |
480 | | std::vector<uint256>& hashes_out) const |
481 | | |
482 | 0 | { |
483 | 0 | std::vector<DBVal> entries; |
484 | 0 | if (!LookupRange(*m_db, m_name, start_height, stop_index, entries)) { Branch (484:9): [True: 0, False: 0]
|
485 | 0 | return false; |
486 | 0 | } |
487 | | |
488 | 0 | hashes_out.clear(); |
489 | 0 | hashes_out.reserve(entries.size()); |
490 | 0 | for (const auto& entry : entries) { Branch (490:28): [True: 0, False: 0]
|
491 | 0 | hashes_out.push_back(entry.hash); |
492 | 0 | } |
493 | 0 | return true; |
494 | 0 | } |
495 | | |
496 | | BlockFilterIndex* GetBlockFilterIndex(BlockFilterType filter_type) |
497 | 11.0k | { |
498 | 11.0k | auto it = g_filter_indexes.find(filter_type); |
499 | 11.0k | return it != g_filter_indexes.end() ? &it->second : nullptr; Branch (499:12): [True: 11.0k, False: 0]
|
500 | 11.0k | } |
501 | | |
502 | | void ForEachBlockFilterIndex(std::function<void (BlockFilterIndex&)> fn) |
503 | 0 | { |
504 | 0 | for (auto& entry : g_filter_indexes) fn(entry.second); Branch (504:22): [True: 0, False: 0]
|
505 | 0 | } |
506 | | |
507 | | bool InitBlockFilterIndex(std::function<std::unique_ptr<interfaces::Chain>()> make_chain, BlockFilterType filter_type, |
508 | | size_t n_cache_size, bool f_memory, bool f_wipe) |
509 | 11.0k | { |
510 | 11.0k | auto result = g_filter_indexes.emplace(std::piecewise_construct, |
511 | 11.0k | std::forward_as_tuple(filter_type), |
512 | 11.0k | std::forward_as_tuple(make_chain(), filter_type, |
513 | 11.0k | n_cache_size, f_memory, f_wipe)); |
514 | 11.0k | return result.second; |
515 | 11.0k | } |
516 | | |
517 | | bool DestroyBlockFilterIndex(BlockFilterType filter_type) |
518 | 0 | { |
519 | 0 | return g_filter_indexes.erase(filter_type); |
520 | 0 | } |
521 | | |
522 | | void DestroyAllBlockFilterIndexes() |
523 | 11.0k | { |
524 | 11.0k | g_filter_indexes.clear(); |
525 | 11.0k | } |