/bitcoin/src/net_processing.cpp
Line | Count | Source |
1 | | // Copyright (c) 2009-2010 Satoshi Nakamoto |
2 | | // Copyright (c) 2009-present The Bitcoin Core developers |
3 | | // Distributed under the MIT software license, see the accompanying |
4 | | // file COPYING or http://www.opensource.org/licenses/mit-license.php. |
5 | | |
6 | | #include <net_processing.h> |
7 | | |
8 | | #include <addrman.h> |
9 | | #include <arith_uint256.h> |
10 | | #include <banman.h> |
11 | | #include <blockencodings.h> |
12 | | #include <blockfilter.h> |
13 | | #include <chain.h> |
14 | | #include <chainparams.h> |
15 | | #include <common/bloom.h> |
16 | | #include <consensus/amount.h> |
17 | | #include <consensus/params.h> |
18 | | #include <consensus/validation.h> |
19 | | #include <core_memusage.h> |
20 | | #include <crypto/siphash.h> |
21 | | #include <deploymentstatus.h> |
22 | | #include <flatfile.h> |
23 | | #include <headerssync.h> |
24 | | #include <index/blockfilterindex.h> |
25 | | #include <kernel/chain.h> |
26 | | #include <logging.h> |
27 | | #include <merkleblock.h> |
28 | | #include <net.h> |
29 | | #include <net_permissions.h> |
30 | | #include <netaddress.h> |
31 | | #include <netbase.h> |
32 | | #include <netmessagemaker.h> |
33 | | #include <node/blockstorage.h> |
34 | | #include <node/connection_types.h> |
35 | | #include <node/protocol_version.h> |
36 | | #include <node/timeoffsets.h> |
37 | | #include <node/txdownloadman.h> |
38 | | #include <node/txreconciliation.h> |
39 | | #include <node/warnings.h> |
40 | | #include <policy/feerate.h> |
41 | | #include <policy/fees.h> |
42 | | #include <policy/packages.h> |
43 | | #include <policy/policy.h> |
44 | | #include <primitives/block.h> |
45 | | #include <primitives/transaction.h> |
46 | | #include <protocol.h> |
47 | | #include <random.h> |
48 | | #include <scheduler.h> |
49 | | #include <script/script.h> |
50 | | #include <serialize.h> |
51 | | #include <span.h> |
52 | | #include <streams.h> |
53 | | #include <sync.h> |
54 | | #include <tinyformat.h> |
55 | | #include <txmempool.h> |
56 | | #include <txorphanage.h> |
57 | | #include <uint256.h> |
58 | | #include <util/check.h> |
59 | | #include <util/strencodings.h> |
60 | | #include <util/time.h> |
61 | | #include <util/trace.h> |
62 | | #include <validation.h> |
63 | | |
64 | | #include <algorithm> |
65 | | #include <array> |
66 | | #include <atomic> |
67 | | #include <compare> |
68 | | #include <cstddef> |
69 | | #include <deque> |
70 | | #include <exception> |
71 | | #include <functional> |
72 | | #include <future> |
73 | | #include <initializer_list> |
74 | | #include <iterator> |
75 | | #include <limits> |
76 | | #include <list> |
77 | | #include <map> |
78 | | #include <memory> |
79 | | #include <optional> |
80 | | #include <queue> |
81 | | #include <ranges> |
82 | | #include <ratio> |
83 | | #include <set> |
84 | | #include <span> |
85 | | #include <typeinfo> |
86 | | #include <utility> |
87 | | |
88 | | using namespace util::hex_literals; |
89 | | |
90 | | TRACEPOINT_SEMAPHORE(net, inbound_message); |
91 | | TRACEPOINT_SEMAPHORE(net, misbehaving_connection); |
92 | | |
93 | | /** Headers download timeout. |
94 | | * Timeout = base + per_header * (expected number of headers) */ |
95 | | static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min; |
96 | | static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms; |
97 | | /** How long to wait for a peer to respond to a getheaders request */ |
98 | | static constexpr auto HEADERS_RESPONSE_TIME{2min}; |
99 | | /** Protect at least this many outbound peers from disconnection due to slow/ |
100 | | * behind headers chain. |
101 | | */ |
102 | | static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4; |
103 | | /** Timeout for (unprotected) outbound peers to sync to our chainwork */ |
104 | | static constexpr auto CHAIN_SYNC_TIMEOUT{20min}; |
105 | | /** How frequently to check for stale tips */ |
106 | | static constexpr auto STALE_CHECK_INTERVAL{10min}; |
107 | | /** How frequently to check for extra outbound peers and disconnect */ |
108 | | static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s}; |
109 | | /** Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict */ |
110 | | static constexpr auto MINIMUM_CONNECT_TIME{30s}; |
111 | | /** SHA256("main address relay")[0:8] */ |
112 | | static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; |
113 | | /// Age after which a stale block will no longer be served if requested as |
114 | | /// protection against fingerprinting. Set to one month, denominated in seconds. |
115 | | static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60; |
116 | | /// Age after which a block is considered historical for purposes of rate |
117 | | /// limiting block relay. Set to one week, denominated in seconds. |
118 | | static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60; |
119 | | /** Time between pings automatically sent out for latency probing and keepalive */ |
120 | | static constexpr auto PING_INTERVAL{2min}; |
121 | | /** The maximum number of entries in a locator */ |
122 | | static const unsigned int MAX_LOCATOR_SZ = 101; |
123 | | /** The maximum number of entries in an 'inv' protocol message */ |
124 | | static const unsigned int MAX_INV_SZ = 50000; |
125 | | /** Limit to avoid sending big packets. Not used in processing incoming GETDATA for compatibility */ |
126 | | static const unsigned int MAX_GETDATA_SZ = 1000; |
127 | | /** Number of blocks that can be requested at any given time from a single peer. */ |
128 | | static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16; |
129 | | /** Default time during which a peer must stall block download progress before being disconnected. |
130 | | * the actual timeout is increased temporarily if peers are disconnected for hitting the timeout */ |
131 | | static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s}; |
132 | | /** Maximum timeout for stalling block download. */ |
133 | | static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s}; |
134 | | /** Maximum depth of blocks we're willing to serve as compact blocks to peers |
135 | | * when requested. For older blocks, a regular BLOCK response will be sent. */ |
136 | | static const int MAX_CMPCTBLOCK_DEPTH = 5; |
137 | | /** Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for. */ |
138 | | static const int MAX_BLOCKTXN_DEPTH = 10; |
139 | | static_assert(MAX_BLOCKTXN_DEPTH <= MIN_BLOCKS_TO_KEEP, "MAX_BLOCKTXN_DEPTH too high"); |
140 | | /** Size of the "block download window": how far ahead of our current height do we fetch? |
141 | | * Larger windows tolerate larger download speed differences between peer, but increase the potential |
142 | | * degree of disordering of blocks on disk (which make reindexing and pruning harder). We'll probably |
143 | | * want to make this a per-peer adaptive value at some point. */ |
144 | | static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024; |
145 | | /** Block download timeout base, expressed in multiples of the block interval (i.e. 10 min) */ |
146 | | static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1; |
147 | | /** Additional block download timeout per parallel downloading peer (i.e. 5 min) */ |
148 | | static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5; |
149 | | /** Maximum number of headers to announce when relaying blocks with headers message.*/ |
150 | | static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8; |
151 | | /** Minimum blocks required to signal NODE_NETWORK_LIMITED */ |
152 | | static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288; |
153 | | /** Window, in blocks, for connecting to NODE_NETWORK_LIMITED peers */ |
154 | | static const unsigned int NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS = 144; |
155 | | /** Average delay between local address broadcasts */ |
156 | | static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h}; |
157 | | /** Average delay between peer address broadcasts */ |
158 | | static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s}; |
159 | | /** Delay between rotating the peers we relay a particular address to */ |
160 | | static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h}; |
161 | | /** Average delay between trickled inventory transmissions for inbound peers. |
162 | | * Blocks and peers with NetPermissionFlags::NoBan permission bypass this. */ |
163 | | static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s}; |
164 | | /** Average delay between trickled inventory transmissions for outbound peers. |
165 | | * Use a smaller delay as there is less privacy concern for them. |
166 | | * Blocks and peers with NetPermissionFlags::NoBan permission bypass this. */ |
167 | | static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL{2s}; |
168 | | /** Maximum rate of inventory items to send per second. |
169 | | * Limits the impact of low-fee transaction floods. */ |
170 | | static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7; |
171 | | /** Target number of tx inventory items to send per transmission. */ |
172 | | static constexpr unsigned int INVENTORY_BROADCAST_TARGET = INVENTORY_BROADCAST_PER_SECOND * count_seconds(INBOUND_INVENTORY_BROADCAST_INTERVAL); |
173 | | /** Maximum number of inventory items to send per transmission. */ |
174 | | static constexpr unsigned int INVENTORY_BROADCAST_MAX = 1000; |
175 | | static_assert(INVENTORY_BROADCAST_MAX >= INVENTORY_BROADCAST_TARGET, "INVENTORY_BROADCAST_MAX too low"); |
176 | | static_assert(INVENTORY_BROADCAST_MAX <= node::MAX_PEER_TX_ANNOUNCEMENTS, "INVENTORY_BROADCAST_MAX too high"); |
177 | | /** Average delay between feefilter broadcasts in seconds. */ |
178 | | static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min}; |
179 | | /** Maximum feefilter broadcast delay after significant change. */ |
180 | | static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min}; |
181 | | /** Maximum number of compact filters that may be requested with one getcfilters. See BIP 157. */ |
182 | | static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000; |
183 | | /** Maximum number of cf hashes that may be requested with one getcfheaders. See BIP 157. */ |
184 | | static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000; |
185 | | /** the maximum percentage of addresses from our addrman to return in response to a getaddr message. */ |
186 | | static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23; |
187 | | /** The maximum number of address records permitted in an ADDR message. */ |
188 | | static constexpr size_t MAX_ADDR_TO_SEND{1000}; |
189 | | /** The maximum rate of address records we're willing to process on average. Can be bypassed using |
190 | | * the NetPermissionFlags::Addr permission. */ |
191 | | static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1}; |
192 | | /** The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND |
193 | | * based increments won't go above this, but the MAX_ADDR_TO_SEND increment following GETADDR |
194 | | * is exempt from this limit). */ |
195 | | static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET{MAX_ADDR_TO_SEND}; |
196 | | /** The compactblocks version we support. See BIP 152. */ |
197 | | static constexpr uint64_t CMPCTBLOCKS_VERSION{2}; |
198 | | |
199 | | // Internal stuff |
200 | | namespace { |
201 | | /** Blocks that are in flight, and that are in the queue to be downloaded. */ |
202 | | struct QueuedBlock { |
203 | | /** BlockIndex. We must have this since we only request blocks when we've already validated the header. */ |
204 | | const CBlockIndex* pindex; |
205 | | /** Optional, used for CMPCTBLOCK downloads */ |
206 | | std::unique_ptr<PartiallyDownloadedBlock> partialBlock; |
207 | | }; |
208 | | |
209 | | /** |
210 | | * Data structure for an individual peer. This struct is not protected by |
211 | | * cs_main since it does not contain validation-critical data. |
212 | | * |
213 | | * Memory is owned by shared pointers and this object is destructed when |
214 | | * the refcount drops to zero. |
215 | | * |
216 | | * Mutexes inside this struct must not be held when locking m_peer_mutex. |
217 | | * |
218 | | * TODO: move most members from CNodeState to this structure. |
219 | | * TODO: move remaining application-layer data members from CNode to this structure. |
220 | | */ |
221 | | struct Peer { |
222 | | /** Same id as the CNode object for this peer */ |
223 | | const NodeId m_id{0}; |
224 | | |
225 | | /** Services we offered to this peer. |
226 | | * |
227 | | * This is supplied by CConnman during peer initialization. It's const |
228 | | * because there is no protocol defined for renegotiating services |
229 | | * initially offered to a peer. The set of local services we offer should |
230 | | * not change after initialization. |
231 | | * |
232 | | * An interesting example of this is NODE_NETWORK and initial block |
233 | | * download: a node which starts up from scratch doesn't have any blocks |
234 | | * to serve, but still advertises NODE_NETWORK because it will eventually |
235 | | * fulfill this role after IBD completes. P2P code is written in such a |
236 | | * way that it can gracefully handle peers who don't make good on their |
237 | | * service advertisements. */ |
238 | | const ServiceFlags m_our_services; |
239 | | /** Services this peer offered to us. */ |
240 | | std::atomic<ServiceFlags> m_their_services{NODE_NONE}; |
241 | | |
242 | | //! Whether this peer is an inbound connection |
243 | | const bool m_is_inbound; |
244 | | |
245 | | /** Protects misbehavior data members */ |
246 | | Mutex m_misbehavior_mutex; |
247 | | /** Whether this peer should be disconnected and marked as discouraged (unless it has NetPermissionFlags::NoBan permission). */ |
248 | | bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false}; |
249 | | |
250 | | /** Protects block inventory data members */ |
251 | | Mutex m_block_inv_mutex; |
252 | | /** List of blocks that we'll announce via an `inv` message. |
253 | | * There is no final sorting before sending, as they are always sent |
254 | | * immediately and in the order requested. */ |
255 | | std::vector<uint256> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex); |
256 | | /** Unfiltered list of blocks that we'd like to announce via a `headers` |
257 | | * message. If we can't announce via a `headers` message, we'll fall back to |
258 | | * announcing via `inv`. */ |
259 | | std::vector<uint256> m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex); |
260 | | /** The final block hash that we sent in an `inv` message to this peer. |
261 | | * When the peer requests this block, we send an `inv` message to trigger |
262 | | * the peer to request the next sequence of block hashes. |
263 | | * Most peers use headers-first syncing, which doesn't use this mechanism */ |
264 | | uint256 m_continuation_block GUARDED_BY(m_block_inv_mutex) {}; |
265 | | |
266 | | /** Set to true once initial VERSION message was sent (only relevant for outbound peers). */ |
267 | | bool m_outbound_version_message_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; |
268 | | |
269 | | /** This peer's reported block height when we connected */ |
270 | | std::atomic<int> m_starting_height{-1}; |
271 | | |
272 | | /** The pong reply we're expecting, or 0 if no pong expected. */ |
273 | | std::atomic<uint64_t> m_ping_nonce_sent{0}; |
274 | | /** When the last ping was sent, or 0 if no ping was ever sent */ |
275 | | std::atomic<std::chrono::microseconds> m_ping_start{0us}; |
276 | | /** Whether a ping has been requested by the user */ |
277 | | std::atomic<bool> m_ping_queued{false}; |
278 | | |
279 | | /** Whether this peer relays txs via wtxid */ |
280 | | std::atomic<bool> m_wtxid_relay{false}; |
281 | | /** The feerate in the most recent BIP133 `feefilter` message sent to the peer. |
282 | | * It is *not* a p2p protocol violation for the peer to send us |
283 | | * transactions with a lower fee rate than this. See BIP133. */ |
284 | | CAmount m_fee_filter_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0}; |
285 | | /** Timestamp after which we will send the next BIP133 `feefilter` message |
286 | | * to the peer. */ |
287 | | std::chrono::microseconds m_next_send_feefilter GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0}; |
288 | | |
289 | | struct TxRelay { |
290 | | mutable RecursiveMutex m_bloom_filter_mutex; |
291 | | /** Whether we relay transactions to this peer. */ |
292 | | bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false}; |
293 | | /** A bloom filter for which transactions to announce to the peer. See BIP37. */ |
294 | | std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr}; |
295 | | |
296 | | mutable RecursiveMutex m_tx_inventory_mutex; |
297 | | /** A filter of all the (w)txids that the peer has announced to |
298 | | * us or we have announced to the peer. We use this to avoid announcing |
299 | | * the same (w)txid to a peer that already has the transaction. */ |
300 | | CRollingBloomFilter m_tx_inventory_known_filter GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001}; |
301 | | /** Set of transaction ids we still have to announce (txid for |
302 | | * non-wtxid-relay peers, wtxid for wtxid-relay peers). We use the |
303 | | * mempool to sort transactions in dependency order before relay, so |
304 | | * this does not have to be sorted. */ |
305 | | std::set<uint256> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex); |
306 | | /** Whether the peer has requested us to send our complete mempool. Only |
307 | | * permitted if the peer has NetPermissionFlags::Mempool or we advertise |
308 | | * NODE_BLOOM. See BIP35. */ |
309 | | bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false}; |
310 | | /** The next time after which we will send an `inv` message containing |
311 | | * transaction announcements to this peer. */ |
312 | | std::chrono::microseconds m_next_inv_send_time GUARDED_BY(m_tx_inventory_mutex){0}; |
313 | | /** The mempool sequence num at which we sent the last `inv` message to this peer. |
314 | | * Can relay txs with lower sequence numbers than this (see CTxMempool::info_for_relay). */ |
315 | | uint64_t m_last_inv_sequence GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1}; |
316 | | |
317 | | /** Minimum fee rate with which to filter transaction announcements to this node. See BIP133. */ |
318 | | std::atomic<CAmount> m_fee_filter_received{0}; |
319 | | }; |
320 | | |
321 | | /* Initializes a TxRelay struct for this peer. Can be called at most once for a peer. */ |
322 | | TxRelay* SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) |
323 | 88.7k | { |
324 | 88.7k | LOCK(m_tx_relay_mutex); |
325 | 88.7k | Assume(!m_tx_relay); |
326 | 88.7k | m_tx_relay = std::make_unique<Peer::TxRelay>(); |
327 | 88.7k | return m_tx_relay.get(); |
328 | 88.7k | }; |
329 | | |
330 | | TxRelay* GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) |
331 | 43.5M | { |
332 | 43.5M | return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get()); |
333 | 43.5M | }; |
334 | | |
335 | | /** A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */ |
336 | | std::vector<CAddress> m_addrs_to_send GUARDED_BY(NetEventsInterface::g_msgproc_mutex); |
337 | | /** Probabilistic filter to track recent addr messages relayed with this |
338 | | * peer. Used to avoid relaying redundant addresses to this peer. |
339 | | * |
340 | | * We initialize this filter for outbound peers (other than |
341 | | * block-relay-only connections) or when an inbound peer sends us an |
342 | | * address related message (ADDR, ADDRV2, GETADDR). |
343 | | * |
344 | | * Presence of this filter must correlate with m_addr_relay_enabled. |
345 | | **/ |
346 | | std::unique_ptr<CRollingBloomFilter> m_addr_known GUARDED_BY(NetEventsInterface::g_msgproc_mutex); |
347 | | /** Whether we are participating in address relay with this connection. |
348 | | * |
349 | | * We set this bool to true for outbound peers (other than |
350 | | * block-relay-only connections), or when an inbound peer sends us an |
351 | | * address related message (ADDR, ADDRV2, GETADDR). |
352 | | * |
353 | | * We use this bool to decide whether a peer is eligible for gossiping |
354 | | * addr messages. This avoids relaying to peers that are unlikely to |
355 | | * forward them, effectively blackholing self announcements. Reasons |
356 | | * peers might support addr relay on the link include that they connected |
357 | | * to us as a block-relay-only peer or they are a light client. |
358 | | * |
359 | | * This field must correlate with whether m_addr_known has been |
360 | | * initialized.*/ |
361 | | std::atomic_bool m_addr_relay_enabled{false}; |
362 | | /** Whether a getaddr request to this peer is outstanding. */ |
363 | | bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; |
364 | | /** Guards address sending timers. */ |
365 | | mutable Mutex m_addr_send_times_mutex; |
366 | | /** Time point to send the next ADDR message to this peer. */ |
367 | | std::chrono::microseconds m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0}; |
368 | | /** Time point to possibly re-announce our local address to this peer. */ |
369 | | std::chrono::microseconds m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0}; |
370 | | /** Whether the peer has signaled support for receiving ADDRv2 (BIP155) |
371 | | * messages, indicating a preference to receive ADDRv2 instead of ADDR ones. */ |
372 | | std::atomic_bool m_wants_addrv2{false}; |
373 | | /** Whether this peer has already sent us a getaddr message. */ |
374 | | bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; |
375 | | /** Number of addresses that can be processed from this peer. Start at 1 to |
376 | | * permit self-announcement. */ |
377 | | double m_addr_token_bucket GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1.0}; |
378 | | /** When m_addr_token_bucket was last updated */ |
379 | | std::chrono::microseconds m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){GetTime<std::chrono::microseconds>()}; |
380 | | /** Total number of addresses that were dropped due to rate limiting. */ |
381 | | std::atomic<uint64_t> m_addr_rate_limited{0}; |
382 | | /** Total number of addresses that were processed (excludes rate-limited ones). */ |
383 | | std::atomic<uint64_t> m_addr_processed{0}; |
384 | | |
385 | | /** Whether we've sent this peer a getheaders in response to an inv prior to initial-headers-sync completing */ |
386 | | bool m_inv_triggered_getheaders_before_sync GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; |
387 | | |
388 | | /** Protects m_getdata_requests **/ |
389 | | Mutex m_getdata_requests_mutex; |
390 | | /** Work queue of items requested by this peer **/ |
391 | | std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex); |
392 | | |
393 | | /** Time of the last getheaders message to this peer */ |
394 | | NodeClock::time_point m_last_getheaders_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){}; |
395 | | |
396 | | /** Protects m_headers_sync **/ |
397 | | Mutex m_headers_sync_mutex; |
398 | | /** Headers-sync state for this peer (eg for initial sync, or syncing large |
399 | | * reorgs) **/ |
400 | | std::unique_ptr<HeadersSyncState> m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex) GUARDED_BY(m_headers_sync_mutex) {}; |
401 | | |
402 | | /** Whether we've sent our peer a sendheaders message. **/ |
403 | | std::atomic<bool> m_sent_sendheaders{false}; |
404 | | |
405 | | /** When to potentially disconnect peer for stalling headers download */ |
406 | | std::chrono::microseconds m_headers_sync_timeout GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0us}; |
407 | | |
408 | | /** Whether this peer wants invs or headers (when possible) for block announcements */ |
409 | | bool m_prefers_headers GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; |
410 | | |
411 | | /** Time offset computed during the version handshake based on the |
412 | | * timestamp the peer sent in the version message. */ |
413 | | std::atomic<std::chrono::seconds> m_time_offset{0s}; |
414 | | |
415 | | explicit Peer(NodeId id, ServiceFlags our_services, bool is_inbound) |
416 | 88.7k | : m_id{id} |
417 | 88.7k | , m_our_services{our_services} |
418 | 88.7k | , m_is_inbound{is_inbound} |
419 | 88.7k | {} |
420 | | |
421 | | private: |
422 | | mutable Mutex m_tx_relay_mutex; |
423 | | |
424 | | /** Transaction relay data. May be a nullptr. */ |
425 | | std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex); |
426 | | }; |
427 | | |
428 | | using PeerRef = std::shared_ptr<Peer>; |
429 | | |
430 | | /** |
431 | | * Maintain validation-specific state about nodes, protected by cs_main, instead |
432 | | * by CNode's own locks. This simplifies asynchronous operation, where |
433 | | * processing of incoming data is done after the ProcessMessage call returns, |
434 | | * and we're no longer holding the node's locks. |
435 | | */ |
436 | | struct CNodeState { |
437 | | //! The best known block we know this peer has announced. |
438 | | const CBlockIndex* pindexBestKnownBlock{nullptr}; |
439 | | //! The hash of the last unknown block this peer has announced. |
440 | | uint256 hashLastUnknownBlock{}; |
441 | | //! The last full block we both have. |
442 | | const CBlockIndex* pindexLastCommonBlock{nullptr}; |
443 | | //! The best header we have sent our peer. |
444 | | const CBlockIndex* pindexBestHeaderSent{nullptr}; |
445 | | //! Whether we've started headers synchronization with this peer. |
446 | | bool fSyncStarted{false}; |
447 | | //! Since when we're stalling block download progress (in microseconds), or 0. |
448 | | std::chrono::microseconds m_stalling_since{0us}; |
449 | | std::list<QueuedBlock> vBlocksInFlight; |
450 | | //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty. |
451 | | std::chrono::microseconds m_downloading_since{0us}; |
452 | | //! Whether we consider this a preferred download peer. |
453 | | bool fPreferredDownload{false}; |
454 | | /** Whether this peer wants invs or cmpctblocks (when possible) for block announcements. */ |
455 | | bool m_requested_hb_cmpctblocks{false}; |
456 | | /** Whether this peer will send us cmpctblocks if we request them. */ |
457 | | bool m_provides_cmpctblocks{false}; |
458 | | |
459 | | /** State used to enforce CHAIN_SYNC_TIMEOUT and EXTRA_PEER_CHECK_INTERVAL logic. |
460 | | * |
461 | | * Both are only in effect for outbound, non-manual, non-protected connections. |
462 | | * Any peer protected (m_protect = true) is not chosen for eviction. A peer is |
463 | | * marked as protected if all of these are true: |
464 | | * - its connection type is IsBlockOnlyConn() == false |
465 | | * - it gave us a valid connecting header |
466 | | * - we haven't reached MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT yet |
467 | | * - its chain tip has at least as much work as ours |
468 | | * |
469 | | * CHAIN_SYNC_TIMEOUT: if a peer's best known block has less work than our tip, |
470 | | * set a timeout CHAIN_SYNC_TIMEOUT in the future: |
471 | | * - If at timeout their best known block now has more work than our tip |
472 | | * when the timeout was set, then either reset the timeout or clear it |
473 | | * (after comparing against our current tip's work) |
474 | | * - If at timeout their best known block still has less work than our |
475 | | * tip did when the timeout was set, then send a getheaders message, |
476 | | * and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future. |
477 | | * If their best known block is still behind when that new timeout is |
478 | | * reached, disconnect. |
479 | | * |
480 | | * EXTRA_PEER_CHECK_INTERVAL: after each interval, if we have too many outbound peers, |
481 | | * drop the outbound one that least recently announced us a new block. |
482 | | */ |
483 | | struct ChainSyncTimeoutState { |
484 | | //! A timeout used for checking whether our peer has sufficiently synced |
485 | | std::chrono::seconds m_timeout{0s}; |
486 | | //! A header with the work we require on our peer's chain |
487 | | const CBlockIndex* m_work_header{nullptr}; |
488 | | //! After timeout is reached, set to true after sending getheaders |
489 | | bool m_sent_getheaders{false}; |
490 | | //! Whether this peer is protected from disconnection due to a bad/slow chain |
491 | | bool m_protect{false}; |
492 | | }; |
493 | | |
494 | | ChainSyncTimeoutState m_chain_sync; |
495 | | |
496 | | //! Time of last new block announcement |
497 | | int64_t m_last_block_announcement{0}; |
498 | | }; |
499 | | |
500 | | class PeerManagerImpl final : public PeerManager |
501 | | { |
502 | | public: |
503 | | PeerManagerImpl(CConnman& connman, AddrMan& addrman, |
504 | | BanMan* banman, ChainstateManager& chainman, |
505 | | CTxMemPool& pool, node::Warnings& warnings, Options opts); |
506 | | |
507 | | /** Overridden from CValidationInterface. */ |
508 | | void ActiveTipChange(const CBlockIndex& new_tip, bool) override |
509 | | EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex); |
510 | | void BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override |
511 | | EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex); |
512 | | void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override |
513 | | EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex); |
514 | | void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override |
515 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
516 | | void BlockChecked(const CBlock& block, const BlockValidationState& state) override |
517 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
518 | | void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override |
519 | | EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex); |
520 | | |
521 | | /** Implement NetEventsInterface */ |
522 | | void InitializeNode(const CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_tx_download_mutex); |
523 | | void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, !m_tx_download_mutex); |
524 | | bool HasAllDesirableServiceFlags(ServiceFlags services) const override; |
525 | | bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override |
526 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex); |
527 | | bool SendMessages(CNode* pto) override |
528 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, g_msgproc_mutex, !m_tx_download_mutex); |
529 | | |
530 | | /** Implement PeerManager */ |
531 | | void StartScheduledTasks(CScheduler& scheduler) override; |
532 | | void CheckForStaleTipAndEvictPeers() override; |
533 | | std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override |
534 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
535 | | bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
536 | | std::vector<TxOrphanage::OrphanTxBase> GetOrphanTransactions() override EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex); |
537 | | PeerManagerInfo GetInfo() const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
538 | | void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
539 | | void RelayTransaction(const uint256& txid, const uint256& wtxid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
540 | | void SetBestBlock(int height, std::chrono::seconds time) override |
541 | 2.24M | { |
542 | 2.24M | m_best_height = height; |
543 | 2.24M | m_best_block_time = time; |
544 | 2.24M | }; |
545 | 0 | void UnitTestMisbehaving(NodeId peer_id) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) { Misbehaving(*Assert(GetPeerRef(peer_id)), ""); }; |
546 | | void ProcessMessage(CNode& pfrom, const std::string& msg_type, DataStream& vRecv, |
547 | | const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override |
548 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex); |
549 | | void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) override; |
550 | | ServiceFlags GetDesirableServiceFlags(ServiceFlags services) const override; |
551 | | |
552 | | private: |
553 | | /** Consider evicting an outbound peer based on the amount of time they've been behind our tip */ |
554 | | void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex); |
555 | | |
556 | | /** If we have extra outbound peers, try to disconnect the one with the oldest block announcement */ |
557 | | void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
558 | | |
559 | | /** Retrieve unbroadcast transactions from the mempool and reattempt sending to peers */ |
560 | | void ReattemptInitialBroadcast(CScheduler& scheduler) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
561 | | |
562 | | /** Get a shared pointer to the Peer object. |
563 | | * May return an empty shared_ptr if the Peer object can't be found. */ |
564 | | PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
565 | | |
566 | | /** Get a shared pointer to the Peer object and remove it from m_peer_map. |
567 | | * May return an empty shared_ptr if the Peer object can't be found. */ |
568 | | PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
569 | | |
570 | | /** Mark a peer as misbehaving, which will cause it to be disconnected and its |
571 | | * address discouraged. */ |
572 | | void Misbehaving(Peer& peer, const std::string& message); |
573 | | |
574 | | /** |
575 | | * Potentially mark a node discouraged based on the contents of a BlockValidationState object |
576 | | * |
577 | | * @param[in] via_compact_block this bool is passed in because net_processing should |
578 | | * punish peers differently depending on whether the data was provided in a compact |
579 | | * block message or not. If the compact block had a valid header, but contained invalid |
580 | | * txs, the peer should not be punished. See BIP 152. |
581 | | */ |
582 | | void MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state, |
583 | | bool via_compact_block, const std::string& message = "") |
584 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
585 | | |
586 | | /** |
587 | | * Potentially disconnect and discourage a node based on the contents of a TxValidationState object |
588 | | */ |
589 | | void MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state) |
590 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
591 | | |
592 | | /** Maybe disconnect a peer and discourage future connections from its address. |
593 | | * |
594 | | * @param[in] pnode The node to check. |
595 | | * @param[in] peer The peer object to check. |
596 | | * @return True if the peer was marked for disconnection in this function |
597 | | */ |
598 | | bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer); |
599 | | |
600 | | /** Handle a transaction whose result was not MempoolAcceptResult::ResultType::VALID. |
601 | | * @param[in] first_time_failure Whether we should consider inserting into vExtraTxnForCompact, adding |
602 | | * a new orphan to resolve, or looking for a package to submit. |
603 | | * Set to true for transactions just received over p2p. |
604 | | * Set to false if the tx has already been rejected before, |
605 | | * e.g. is already in the orphanage, to avoid adding duplicate entries. |
606 | | * Updates m_txrequest, m_lazy_recent_rejects, m_lazy_recent_rejects_reconsiderable, m_orphanage, and vExtraTxnForCompact. |
607 | | * |
608 | | * @returns a PackageToValidate if this transaction has a reconsiderable failure and an eligible package was found, |
609 | | * or std::nullopt otherwise. |
610 | | */ |
611 | | std::optional<node::PackageToValidate> ProcessInvalidTx(NodeId nodeid, const CTransactionRef& tx, const TxValidationState& result, |
612 | | bool first_time_failure) |
613 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex); |
614 | | |
615 | | /** Handle a transaction whose result was MempoolAcceptResult::ResultType::VALID. |
616 | | * Updates m_txrequest, m_orphanage, and vExtraTxnForCompact. Also queues the tx for relay. */ |
617 | | void ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions) |
618 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex); |
619 | | |
620 | | /** Handle the results of package validation: calls ProcessValidTx and ProcessInvalidTx for |
621 | | * individual transactions, and caches rejection for the package as a group. |
622 | | */ |
623 | | void ProcessPackageResult(const node::PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result) |
624 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex); |
625 | | |
626 | | /** |
627 | | * Reconsider orphan transactions after a parent has been accepted to the mempool. |
628 | | * |
629 | | * @peer[in] peer The peer whose orphan transactions we will reconsider. Generally only |
630 | | * one orphan will be reconsidered on each call of this function. If an |
631 | | * accepted orphan has orphaned children, those will need to be |
632 | | * reconsidered, creating more work, possibly for other peers. |
633 | | * @return True if meaningful work was done (an orphan was accepted/rejected). |
634 | | * If no meaningful work was done, then the work set for this peer |
635 | | * will be empty. |
636 | | */ |
637 | | bool ProcessOrphanTx(Peer& peer) |
638 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, !m_tx_download_mutex); |
639 | | |
640 | | /** Process a single headers message from a peer. |
641 | | * |
642 | | * @param[in] pfrom CNode of the peer |
643 | | * @param[in] peer The peer sending us the headers |
644 | | * @param[in] headers The headers received. Note that this may be modified within ProcessHeadersMessage. |
645 | | * @param[in] via_compact_block Whether this header came in via compact block handling. |
646 | | */ |
647 | | void ProcessHeadersMessage(CNode& pfrom, Peer& peer, |
648 | | std::vector<CBlockHeader>&& headers, |
649 | | bool via_compact_block) |
650 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex); |
651 | | /** Various helpers for headers processing, invoked by ProcessHeadersMessage() */ |
652 | | /** Return true if headers are continuous and have valid proof-of-work (DoS points assigned on failure) */ |
653 | | bool CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer); |
654 | | /** Calculate an anti-DoS work threshold for headers chains */ |
655 | | arith_uint256 GetAntiDoSWorkThreshold(); |
656 | | /** Deal with state tracking and headers sync for peers that send |
657 | | * non-connecting headers (this can happen due to BIP 130 headers |
658 | | * announcements for blocks interacting with the 2hr (MAX_FUTURE_BLOCK_TIME) rule). */ |
659 | | void HandleUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
660 | | /** Return true if the headers connect to each other, false otherwise */ |
661 | | bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const; |
662 | | /** Try to continue a low-work headers sync that has already begun. |
663 | | * Assumes the caller has already verified the headers connect, and has |
664 | | * checked that each header satisfies the proof-of-work target included in |
665 | | * the header. |
666 | | * @param[in] peer The peer we're syncing with. |
667 | | * @param[in] pfrom CNode of the peer |
668 | | * @param[in,out] headers The headers to be processed. |
669 | | * @return True if the passed in headers were successfully processed |
670 | | * as the continuation of a low-work headers sync in progress; |
671 | | * false otherwise. |
672 | | * If false, the passed in headers will be returned back to |
673 | | * the caller. |
674 | | * If true, the returned headers may be empty, indicating |
675 | | * there is no more work for the caller to do; or the headers |
676 | | * may be populated with entries that have passed anti-DoS |
677 | | * checks (and therefore may be validated for block index |
678 | | * acceptance by the caller). |
679 | | */ |
680 | | bool IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, |
681 | | std::vector<CBlockHeader>& headers) |
682 | | EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex, !m_headers_presync_mutex, g_msgproc_mutex); |
683 | | /** Check work on a headers chain to be processed, and if insufficient, |
684 | | * initiate our anti-DoS headers sync mechanism. |
685 | | * |
686 | | * @param[in] peer The peer whose headers we're processing. |
687 | | * @param[in] pfrom CNode of the peer |
688 | | * @param[in] chain_start_header Where these headers connect in our index. |
689 | | * @param[in,out] headers The headers to be processed. |
690 | | * |
691 | | * @return True if chain was low work (headers will be empty after |
692 | | * calling); false otherwise. |
693 | | */ |
694 | | bool TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, |
695 | | const CBlockIndex* chain_start_header, |
696 | | std::vector<CBlockHeader>& headers) |
697 | | EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex); |
698 | | |
699 | | /** Return true if the given header is an ancestor of |
700 | | * m_chainman.m_best_header or our current tip */ |
701 | | bool IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
702 | | |
703 | | /** Request further headers from this peer with a given locator. |
704 | | * We don't issue a getheaders message if we have a recent one outstanding. |
705 | | * This returns true if a getheaders is actually sent, and false otherwise. |
706 | | */ |
707 | | bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
708 | | /** Potentially fetch blocks from this peer upon receipt of a new headers tip */ |
709 | | void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header); |
710 | | /** Update peer state based on received headers message */ |
711 | | void UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer, const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers) |
712 | | EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
713 | | |
714 | | void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req); |
715 | | |
716 | | /** Send a message to a peer */ |
717 | 7.74k | void PushMessage(CNode& node, CSerializedNetMsg&& msg) const { m_connman.PushMessage(&node, std::move(msg)); } |
718 | | template <typename... Args> |
719 | | void MakeAndPushMessage(CNode& node, std::string msg_type, Args&&... args) const |
720 | 12.9M | { |
721 | 12.9M | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); |
722 | 12.9M | } net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<bool, unsigned long const&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, bool&&, unsigned long const&) const Line | Count | Source | 720 | 102k | { | 721 | 102k | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 722 | 102k | } |
net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<std::vector<CInv, std::allocator<CInv> >&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::vector<CInv, std::allocator<CInv> >&) const Line | Count | Source | 720 | 9.98M | { | 721 | 9.98M | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 722 | 9.98M | } |
net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<int const&, unsigned long&, long const&, unsigned long&, ParamsWrapper<CNetAddr::SerParams, CService>, unsigned long&, ParamsWrapper<CNetAddr::SerParams, CService>, unsigned long&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >&, int const&, bool const&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, int const&, unsigned long&, long const&, unsigned long&, ParamsWrapper<CNetAddr::SerParams, CService>&&, unsigned long&, ParamsWrapper<CNetAddr::SerParams, CService>&&, unsigned long&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >&, int const&, bool const&) const Line | Count | Source | 720 | 88.7k | { | 721 | 88.7k | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 722 | 88.7k | } |
net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) const Line | Count | Source | 720 | 399k | { | 721 | 399k | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 722 | 399k | } |
net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<unsigned int const&, unsigned long const&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, unsigned int const&, unsigned long const&) const Line | Count | Source | 720 | 88.7k | { | 721 | 88.7k | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 722 | 88.7k | } |
Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<std::array<std::byte, 168ul> const&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::array<std::byte, 168ul> const&) const net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<CBlockLocator const&, uint256>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, CBlockLocator const&, uint256&&) const Line | Count | Source | 720 | 91.6k | { | 721 | 91.6k | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 722 | 91.6k | } |
net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<ParamsWrapper<TransactionSerParams, CTransaction const> >(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, ParamsWrapper<TransactionSerParams, CTransaction const>&&) const Line | Count | Source | 720 | 2.07k | { | 721 | 2.07k | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 722 | 2.07k | } |
net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<std::span<unsigned char, 18446744073709551615ul> >(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::span<unsigned char, 18446744073709551615ul>&&) const Line | Count | Source | 720 | 2.00k | { | 721 | 2.00k | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 722 | 2.00k | } |
net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<ParamsWrapper<TransactionSerParams, CBlock const> >(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, ParamsWrapper<TransactionSerParams, CBlock const>&&) const Line | Count | Source | 720 | 3.31k | { | 721 | 3.31k | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 722 | 3.31k | } |
Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<CMerkleBlock&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, CMerkleBlock&) const Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<CBlockHeaderAndShortTxIDs const&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, CBlockHeaderAndShortTxIDs const&) const net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<CBlockHeaderAndShortTxIDs&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, CBlockHeaderAndShortTxIDs&) const Line | Count | Source | 720 | 526 | { | 721 | 526 | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 722 | 526 | } |
Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<BlockTransactions&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, BlockTransactions&) const Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<std::vector<CBlockHeader, std::allocator<CBlockHeader> > >(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::vector<CBlockHeader, std::allocator<CBlockHeader> >&&) const net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<ParamsWrapper<TransactionSerParams, std::vector<CBlock, std::allocator<CBlock> > > >(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, ParamsWrapper<TransactionSerParams, std::vector<CBlock, std::allocator<CBlock> > >&&) const Line | Count | Source | 720 | 103 | { | 721 | 103 | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 722 | 103 | } |
Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<BlockTransactionsRequest&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, BlockTransactionsRequest&) const net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<unsigned long&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, unsigned long&) const Line | Count | Source | 720 | 2.10M | { | 721 | 2.10M | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 722 | 2.10M | } |
Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<BlockFilter const&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, BlockFilter const&) const Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<unsigned char&, uint256, uint256&, std::vector<uint256, std::allocator<uint256> >&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, unsigned char&, uint256&&, uint256&, std::vector<uint256, std::allocator<uint256> >&) const Unexecuted instantiation: net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<unsigned char&, uint256, std::vector<uint256, std::allocator<uint256> >&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, unsigned char&, uint256&&, std::vector<uint256, std::allocator<uint256> >&) const net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<ParamsWrapper<CAddress::SerParams, std::vector<CAddress, std::allocator<CAddress> > > >(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, ParamsWrapper<CAddress::SerParams, std::vector<CAddress, std::allocator<CAddress> > >&&) const Line | Count | Source | 720 | 145 | { | 721 | 145 | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 722 | 145 | } |
net_processing.cpp:void (anonymous namespace)::PeerManagerImpl::MakeAndPushMessage<long&>(CNode&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, long&) const Line | Count | Source | 720 | 88.7k | { | 721 | 88.7k | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 722 | 88.7k | } |
|
723 | | |
724 | | /** Send a version message to a peer */ |
725 | | void PushNodeVersion(CNode& pnode, const Peer& peer); |
726 | | |
727 | | /** Send a ping message every PING_INTERVAL or if requested via RPC. May |
728 | | * mark the peer to be disconnected if a ping has timed out. |
729 | | * We use mockable time for ping timeouts, so setmocktime may cause pings |
730 | | * to time out. */ |
731 | | void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now); |
732 | | |
733 | | /** Send `addr` messages on a regular schedule. */ |
734 | | void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
735 | | |
736 | | /** Send a single `sendheaders` message, after we have completed headers sync with a peer. */ |
737 | | void MaybeSendSendHeaders(CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
738 | | |
739 | | /** Relay (gossip) an address to a few randomly chosen nodes. |
740 | | * |
741 | | * @param[in] originator The id of the peer that sent us the address. We don't want to relay it back. |
742 | | * @param[in] addr Address to relay. |
743 | | * @param[in] fReachable Whether the address' network is reachable. We relay unreachable |
744 | | * addresses less. |
745 | | */ |
746 | | void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex); |
747 | | |
748 | | /** Send `feefilter` message. */ |
749 | | void MaybeSendFeefilter(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
750 | | |
751 | | FastRandomContext m_rng GUARDED_BY(NetEventsInterface::g_msgproc_mutex); |
752 | | |
753 | | FeeFilterRounder m_fee_filter_rounder GUARDED_BY(NetEventsInterface::g_msgproc_mutex); |
754 | | |
755 | | const CChainParams& m_chainparams; |
756 | | CConnman& m_connman; |
757 | | AddrMan& m_addrman; |
758 | | /** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */ |
759 | | BanMan* const m_banman; |
760 | | ChainstateManager& m_chainman; |
761 | | CTxMemPool& m_mempool; |
762 | | |
763 | | /** Synchronizes tx download including TxRequestTracker, rejection filters, and TxOrphanage. |
764 | | * Lock invariants: |
765 | | * - A txhash (txid or wtxid) in m_txrequest is not also in m_orphanage. |
766 | | * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_rejects. |
767 | | * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_rejects_reconsiderable. |
768 | | * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_confirmed_transactions. |
769 | | * - Each data structure's limits hold (m_orphanage max size, m_txrequest per-peer limits, etc). |
770 | | */ |
771 | | Mutex m_tx_download_mutex ACQUIRED_BEFORE(m_mempool.cs); |
772 | | node::TxDownloadManager m_txdownloadman GUARDED_BY(m_tx_download_mutex); |
773 | | |
774 | | std::unique_ptr<TxReconciliationTracker> m_txreconciliation; |
775 | | |
776 | | /** The height of the best chain */ |
777 | | std::atomic<int> m_best_height{-1}; |
778 | | /** The time of the best chain tip block */ |
779 | | std::atomic<std::chrono::seconds> m_best_block_time{0s}; |
780 | | |
781 | | /** Next time to check for stale tip */ |
782 | | std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s}; |
783 | | |
784 | | node::Warnings& m_warnings; |
785 | | TimeOffsets m_outbound_time_offsets{m_warnings}; |
786 | | |
787 | | const Options m_opts; |
788 | | |
789 | | bool RejectIncomingTxs(const CNode& peer) const; |
790 | | |
791 | | /** Whether we've completed initial sync yet, for determining when to turn |
792 | | * on extra block-relay-only peers. */ |
793 | | bool m_initial_sync_finished GUARDED_BY(cs_main){false}; |
794 | | |
795 | | /** Protects m_peer_map. This mutex must not be locked while holding a lock |
796 | | * on any of the mutexes inside a Peer object. */ |
797 | | mutable Mutex m_peer_mutex; |
798 | | /** |
799 | | * Map of all Peer objects, keyed by peer id. This map is protected |
800 | | * by the m_peer_mutex. Once a shared pointer reference is |
801 | | * taken, the lock may be released. Individual fields are protected by |
802 | | * their own locks. |
803 | | */ |
804 | | std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex); |
805 | | |
806 | | /** Map maintaining per-node state. */ |
807 | | std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main); |
808 | | |
809 | | /** Get a pointer to a const CNodeState, used when not mutating the CNodeState object. */ |
810 | | const CNodeState* State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
811 | | /** Get a pointer to a mutable CNodeState. */ |
812 | | CNodeState* State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
813 | | |
814 | | uint32_t GetFetchFlags(const Peer& peer) const; |
815 | | |
816 | | std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us}; |
817 | | |
818 | | /** Number of nodes with fSyncStarted. */ |
819 | | int nSyncStarted GUARDED_BY(cs_main) = 0; |
820 | | |
821 | | /** Hash of the last block we received via INV */ |
822 | | uint256 m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){}; |
823 | | |
824 | | /** |
825 | | * Sources of received blocks, saved to be able punish them when processing |
826 | | * happens afterwards. |
827 | | * Set mapBlockSource[hash].second to false if the node should not be |
828 | | * punished if the block is invalid. |
829 | | */ |
830 | | std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main); |
831 | | |
832 | | /** Number of peers with wtxid relay. */ |
833 | | std::atomic<int> m_wtxid_relay_peers{0}; |
834 | | |
835 | | /** Number of outbound peers with m_chain_sync.m_protect. */ |
836 | | int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; |
837 | | |
838 | | /** Number of preferable block download peers. */ |
839 | | int m_num_preferred_download_peers GUARDED_BY(cs_main){0}; |
840 | | |
841 | | /** Stalling timeout for blocks in IBD */ |
842 | | std::atomic<std::chrono::seconds> m_block_stalling_timeout{BLOCK_STALLING_TIMEOUT_DEFAULT}; |
843 | | |
844 | | /** |
845 | | * For sending `inv`s to inbound peers, we use a single (exponentially |
846 | | * distributed) timer for all peers. If we used a separate timer for each |
847 | | * peer, a spy node could make multiple inbound connections to us to |
848 | | * accurately determine when we received the transaction (and potentially |
849 | | * determine the transaction's origin). */ |
850 | | std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now, |
851 | | std::chrono::seconds average_interval) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
852 | | |
853 | | |
854 | | // All of the following cache a recent block, and are protected by m_most_recent_block_mutex |
855 | | Mutex m_most_recent_block_mutex; |
856 | | std::shared_ptr<const CBlock> m_most_recent_block GUARDED_BY(m_most_recent_block_mutex); |
857 | | std::shared_ptr<const CBlockHeaderAndShortTxIDs> m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex); |
858 | | uint256 m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex); |
859 | | std::unique_ptr<const std::map<uint256, CTransactionRef>> m_most_recent_block_txs GUARDED_BY(m_most_recent_block_mutex); |
860 | | |
861 | | // Data about the low-work headers synchronization, aggregated from all peers' HeadersSyncStates. |
862 | | /** Mutex guarding the other m_headers_presync_* variables. */ |
863 | | Mutex m_headers_presync_mutex; |
864 | | /** A type to represent statistics about a peer's low-work headers sync. |
865 | | * |
866 | | * - The first field is the total verified amount of work in that synchronization. |
867 | | * - The second is: |
868 | | * - nullopt: the sync is in REDOWNLOAD phase (phase 2). |
869 | | * - {height, timestamp}: the sync has the specified tip height and block timestamp (phase 1). |
870 | | */ |
871 | | using HeadersPresyncStats = std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>; |
872 | | /** Statistics for all peers in low-work headers sync. */ |
873 | | std::map<NodeId, HeadersPresyncStats> m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex) {}; |
874 | | /** The peer with the most-work entry in m_headers_presync_stats. */ |
875 | | NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex) {-1}; |
876 | | /** The m_headers_presync_stats improved, and needs signalling. */ |
877 | | std::atomic_bool m_headers_presync_should_signal{false}; |
878 | | |
879 | | /** Height of the highest block announced using BIP 152 high-bandwidth mode. */ |
880 | | int m_highest_fast_announce GUARDED_BY(::cs_main){0}; |
881 | | |
882 | | /** Have we requested this block from a peer */ |
883 | | bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
884 | | |
885 | | /** Have we requested this block from an outbound peer */ |
886 | | bool IsBlockRequestedFromOutbound(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex); |
887 | | |
888 | | /** Remove this block from our tracked requested blocks. Called if: |
889 | | * - the block has been received from a peer |
890 | | * - the request for the block has timed out |
891 | | * If "from_peer" is specified, then only remove the block if it is in |
892 | | * flight from that peer (to avoid one peer's network traffic from |
893 | | * affecting another's state). |
894 | | */ |
895 | | void RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
896 | | |
897 | | /* Mark a block as in flight |
898 | | * Returns false, still setting pit, if the block was already in flight from the same peer |
899 | | * pit will only be valid as long as the same cs_main lock is being held |
900 | | */ |
901 | | bool BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
902 | | |
903 | | bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
904 | | |
905 | | /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has |
906 | | * at most count entries. |
907 | | */ |
908 | | void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
909 | | |
910 | | /** Request blocks for the background chainstate, if one is in use. */ |
911 | | void TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex* from_tip, const CBlockIndex* target_block) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
912 | | |
913 | | /** |
914 | | * \brief Find next blocks to download from a peer after a starting block. |
915 | | * |
916 | | * \param vBlocks Vector of blocks to download which will be appended to. |
917 | | * \param peer Peer which blocks will be downloaded from. |
918 | | * \param state Pointer to the state of the peer. |
919 | | * \param pindexWalk Pointer to the starting block to add to vBlocks. |
920 | | * \param count Maximum number of blocks to allow in vBlocks. No more |
921 | | * blocks will be added if it reaches this size. |
922 | | * \param nWindowEnd Maximum height of blocks to allow in vBlocks. No |
923 | | * blocks will be added above this height. |
924 | | * \param activeChain Optional pointer to a chain to compare against. If |
925 | | * provided, any next blocks which are already contained |
926 | | * in this chain will not be appended to vBlocks, but |
927 | | * instead will be used to update the |
928 | | * state->pindexLastCommonBlock pointer. |
929 | | * \param nodeStaller Optional pointer to a NodeId variable that will receive |
930 | | * the ID of another peer that might be causing this peer |
931 | | * to stall. This is set to the ID of the peer which |
932 | | * first requested the first in-flight block in the |
933 | | * download window. It is only set if vBlocks is empty at |
934 | | * the end of this function call and if increasing |
935 | | * nWindowEnd by 1 would cause it to be non-empty (which |
936 | | * indicates the download might be stalled because every |
937 | | * block in the window is in flight and no other peer is |
938 | | * trying to download the next block). |
939 | | */ |
940 | | void FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain=nullptr, NodeId* nodeStaller=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
941 | | |
942 | | /* Multimap used to preserve insertion order */ |
943 | | typedef std::multimap<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator>> BlockDownloadMap; |
944 | | BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main); |
945 | | |
946 | | /** When our tip was last updated. */ |
947 | | std::atomic<std::chrono::seconds> m_last_tip_update{0s}; |
948 | | |
949 | | /** Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). */ |
950 | | CTransactionRef FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid) |
951 | | EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, NetEventsInterface::g_msgproc_mutex); |
952 | | |
953 | | void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) |
954 | | EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex, NetEventsInterface::g_msgproc_mutex) |
955 | | LOCKS_EXCLUDED(::cs_main); |
956 | | |
957 | | /** Process a new block. Perform any post-processing housekeeping */ |
958 | | void ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked); |
959 | | |
960 | | /** Process compact block txns */ |
961 | | void ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions) |
962 | | EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex); |
963 | | |
964 | | /** |
965 | | * When a peer sends us a valid block, instruct it to announce blocks to us |
966 | | * using CMPCTBLOCK if possible by adding its nodeid to the end of |
967 | | * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by |
968 | | * removing the first element if necessary. |
969 | | */ |
970 | | void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex); |
971 | | |
972 | | /** Stack of nodes which we have set to announce using compact blocks */ |
973 | | std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main); |
974 | | |
975 | | /** Number of peers from which we're downloading blocks. */ |
976 | | int m_peers_downloading_from GUARDED_BY(cs_main) = 0; |
977 | | |
978 | | void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
979 | | |
980 | | /** Orphan/conflicted/etc transactions that are kept for compact block reconstruction. |
981 | | * The last -blockreconstructionextratxn/DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN of |
982 | | * these are kept in a ring buffer */ |
983 | | std::vector<CTransactionRef> vExtraTxnForCompact GUARDED_BY(g_msgproc_mutex); |
984 | | /** Offset into vExtraTxnForCompact to insert the next tx */ |
985 | | size_t vExtraTxnForCompactIt GUARDED_BY(g_msgproc_mutex) = 0; |
986 | | |
987 | | /** Check whether the last unknown block a peer advertised is not yet known. */ |
988 | | void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
989 | | /** Update tracking information about which blocks a peer is assumed to have. */ |
990 | | void UpdateBlockAvailability(NodeId nodeid, const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
991 | | bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
992 | | |
993 | | /** |
994 | | * Estimates the distance, in blocks, between the best-known block and the network chain tip. |
995 | | * Utilizes the best-block time and the chainparams blocks spacing to approximate it. |
996 | | */ |
997 | | int64_t ApproximateBestBlockDepth() const; |
998 | | |
999 | | /** |
1000 | | * To prevent fingerprinting attacks, only send blocks/headers outside of |
1001 | | * the active chain if they are no more than a month older (both in time, |
1002 | | * and in best equivalent proof of work) than the best header chain we know |
1003 | | * about and we fully-validated them at some point. |
1004 | | */ |
1005 | | bool BlockRequestAllowed(const CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
1006 | | bool AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
1007 | | void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv) |
1008 | | EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex); |
1009 | | |
1010 | | /** |
1011 | | * Validation logic for compact filters request handling. |
1012 | | * |
1013 | | * May disconnect from the peer in the case of a bad request. |
1014 | | * |
1015 | | * @param[in] node The node that we received the request from |
1016 | | * @param[in] peer The peer that we received the request from |
1017 | | * @param[in] filter_type The filter type the request is for. Must be basic filters. |
1018 | | * @param[in] start_height The start height for the request |
1019 | | * @param[in] stop_hash The stop_hash for the request |
1020 | | * @param[in] max_height_diff The maximum number of items permitted to request, as specified in BIP 157 |
1021 | | * @param[out] stop_index The CBlockIndex for the stop_hash block, if the request can be serviced. |
1022 | | * @param[out] filter_index The filter index, if the request can be serviced. |
1023 | | * @return True if the request can be serviced. |
1024 | | */ |
1025 | | bool PrepareBlockFilterRequest(CNode& node, Peer& peer, |
1026 | | BlockFilterType filter_type, uint32_t start_height, |
1027 | | const uint256& stop_hash, uint32_t max_height_diff, |
1028 | | const CBlockIndex*& stop_index, |
1029 | | BlockFilterIndex*& filter_index); |
1030 | | |
1031 | | /** |
1032 | | * Handle a cfilters request. |
1033 | | * |
1034 | | * May disconnect from the peer in the case of a bad request. |
1035 | | * |
1036 | | * @param[in] node The node that we received the request from |
1037 | | * @param[in] peer The peer that we received the request from |
1038 | | * @param[in] vRecv The raw message received |
1039 | | */ |
1040 | | void ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv); |
1041 | | |
1042 | | /** |
1043 | | * Handle a cfheaders request. |
1044 | | * |
1045 | | * May disconnect from the peer in the case of a bad request. |
1046 | | * |
1047 | | * @param[in] node The node that we received the request from |
1048 | | * @param[in] peer The peer that we received the request from |
1049 | | * @param[in] vRecv The raw message received |
1050 | | */ |
1051 | | void ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv); |
1052 | | |
1053 | | /** |
1054 | | * Handle a getcfcheckpt request. |
1055 | | * |
1056 | | * May disconnect from the peer in the case of a bad request. |
1057 | | * |
1058 | | * @param[in] node The node that we received the request from |
1059 | | * @param[in] peer The peer that we received the request from |
1060 | | * @param[in] vRecv The raw message received |
1061 | | */ |
1062 | | void ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv); |
1063 | | |
1064 | | /** Checks if address relay is permitted with peer. If needed, initializes |
1065 | | * the m_addr_known bloom filter and sets m_addr_relay_enabled to true. |
1066 | | * |
1067 | | * @return True if address relay is enabled with peer |
1068 | | * False if address relay is disallowed |
1069 | | */ |
1070 | | bool SetupAddressRelay(const CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
1071 | | |
1072 | | void AddAddressKnown(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
1073 | | void PushAddress(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
1074 | | |
1075 | | void LogBlockHeader(const CBlockIndex& index, const CNode& peer, bool via_compact_block); |
1076 | | }; |
1077 | | |
1078 | | const CNodeState* PeerManagerImpl::State(NodeId pnode) const |
1079 | 268M | { |
1080 | 268M | std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode); |
1081 | 268M | if (it == m_node_states.end()) Branch (1081:9): [True: 110, False: 268M]
|
1082 | 110 | return nullptr; |
1083 | 268M | return &it->second; |
1084 | 268M | } |
1085 | | |
1086 | | CNodeState* PeerManagerImpl::State(NodeId pnode) |
1087 | 268M | { |
1088 | 268M | return const_cast<CNodeState*>(std::as_const(*this).State(pnode)); |
1089 | 268M | } |
1090 | | |
1091 | | /** |
1092 | | * Whether the peer supports the address. For example, a peer that does not |
1093 | | * implement BIP155 cannot receive Tor v3 addresses because it requires |
1094 | | * ADDRv2 (BIP155) encoding. |
1095 | | */ |
1096 | | static bool IsAddrCompatible(const Peer& peer, const CAddress& addr) |
1097 | 811 | { |
1098 | 811 | return peer.m_wants_addrv2 || addr.IsAddrV1Compatible(); Branch (1098:12): [True: 0, False: 811]
Branch (1098:35): [True: 811, False: 0]
|
1099 | 811 | } |
1100 | | |
1101 | | void PeerManagerImpl::AddAddressKnown(Peer& peer, const CAddress& addr) |
1102 | 5.65k | { |
1103 | 5.65k | assert(peer.m_addr_known); Branch (1103:5): [True: 5.65k, False: 0]
|
1104 | 5.65k | peer.m_addr_known->insert(addr.GetKey()); |
1105 | 5.65k | } |
1106 | | |
1107 | | void PeerManagerImpl::PushAddress(Peer& peer, const CAddress& addr) |
1108 | 549 | { |
1109 | | // Known checking here is only to save space from duplicates. |
1110 | | // Before sending, we'll filter it again for known addresses that were |
1111 | | // added after addresses were pushed. |
1112 | 549 | assert(peer.m_addr_known); Branch (1112:5): [True: 549, False: 0]
|
1113 | 549 | if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) && IsAddrCompatible(peer, addr)) { Branch (1113:9): [True: 549, False: 0]
Branch (1113:9): [True: 507, False: 42]
Branch (1113:27): [True: 507, False: 42]
Branch (1113:74): [True: 507, False: 0]
|
1114 | 507 | if (peer.m_addrs_to_send.size() >= MAX_ADDR_TO_SEND) { Branch (1114:13): [True: 0, False: 507]
|
1115 | 0 | peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] = addr; |
1116 | 507 | } else { |
1117 | 507 | peer.m_addrs_to_send.push_back(addr); |
1118 | 507 | } |
1119 | 507 | } |
1120 | 549 | } |
1121 | | |
1122 | | static void AddKnownTx(Peer& peer, const uint256& hash) |
1123 | 1.15M | { |
1124 | 1.15M | auto tx_relay = peer.GetTxRelay(); |
1125 | 1.15M | if (!tx_relay) return; Branch (1125:9): [True: 0, False: 1.15M]
|
1126 | | |
1127 | 1.15M | LOCK(tx_relay->m_tx_inventory_mutex); |
1128 | 1.15M | tx_relay->m_tx_inventory_known_filter.insert(hash); |
1129 | 1.15M | } |
1130 | | |
1131 | | /** Whether this peer can serve us blocks. */ |
1132 | | static bool CanServeBlocks(const Peer& peer) |
1133 | 62.8M | { |
1134 | 62.8M | return peer.m_their_services & (NODE_NETWORK|NODE_NETWORK_LIMITED); |
1135 | 62.8M | } |
1136 | | |
1137 | | /** Whether this peer can only serve limited recent blocks (e.g. because |
1138 | | * it prunes old blocks) */ |
1139 | | static bool IsLimitedPeer(const Peer& peer) |
1140 | 41.0M | { |
1141 | 41.0M | return (!(peer.m_their_services & NODE_NETWORK) && Branch (1141:13): [True: 0, False: 41.0M]
|
1142 | 41.0M | (peer.m_their_services & NODE_NETWORK_LIMITED)); Branch (1142:14): [True: 0, False: 0]
|
1143 | 41.0M | } |
1144 | | |
1145 | | /** Whether this peer can serve us witness data */ |
1146 | | static bool CanServeWitnesses(const Peer& peer) |
1147 | 1.13M | { |
1148 | 1.13M | return peer.m_their_services & NODE_WITNESS; |
1149 | 1.13M | } |
1150 | | |
1151 | | std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now, |
1152 | | std::chrono::seconds average_interval) |
1153 | 1.52M | { |
1154 | 1.52M | if (m_next_inv_to_inbounds.load() < now) { Branch (1154:9): [True: 388k, False: 1.13M]
|
1155 | | // If this function were called from multiple threads simultaneously |
1156 | | // it would possible that both update the next send variable, and return a different result to their caller. |
1157 | | // This is not possible in practice as only the net processing thread invokes this function. |
1158 | 388k | m_next_inv_to_inbounds = now + m_rng.rand_exp_duration(average_interval); |
1159 | 388k | } |
1160 | 1.52M | return m_next_inv_to_inbounds; |
1161 | 1.52M | } |
1162 | | |
1163 | | bool PeerManagerImpl::IsBlockRequested(const uint256& hash) |
1164 | 2.84M | { |
1165 | 2.84M | return mapBlocksInFlight.count(hash); |
1166 | 2.84M | } |
1167 | | |
1168 | | bool PeerManagerImpl::IsBlockRequestedFromOutbound(const uint256& hash) |
1169 | 0 | { |
1170 | 0 | for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) { Branch (1170:60): [True: 0, False: 0]
|
1171 | 0 | auto [nodeid, block_it] = range.first->second; |
1172 | 0 | PeerRef peer{GetPeerRef(nodeid)}; |
1173 | 0 | if (peer && !peer->m_is_inbound) return true; Branch (1173:13): [True: 0, False: 0]
Branch (1173:21): [True: 0, False: 0]
|
1174 | 0 | } |
1175 | | |
1176 | 0 | return false; |
1177 | 0 | } |
1178 | | |
1179 | | void PeerManagerImpl::RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer) |
1180 | 4.48M | { |
1181 | 4.48M | auto range = mapBlocksInFlight.equal_range(hash); |
1182 | 4.48M | if (range.first == range.second) { Branch (1182:9): [True: 4.47M, False: 9.06k]
|
1183 | | // Block was not requested from any peer |
1184 | 4.47M | return; |
1185 | 4.47M | } |
1186 | | |
1187 | | // We should not have requested too many of this block |
1188 | 9.06k | Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK); |
1189 | | |
1190 | 18.1k | while (range.first != range.second) { Branch (1190:12): [True: 9.06k, False: 9.06k]
|
1191 | 9.06k | const auto& [node_id, list_it]{range.first->second}; |
1192 | | |
1193 | 9.06k | if (from_peer && *from_peer != node_id) { Branch (1193:13): [True: 8.51k, False: 549]
Branch (1193:26): [True: 560, False: 7.95k]
|
1194 | 560 | range.first++; |
1195 | 560 | continue; |
1196 | 560 | } |
1197 | | |
1198 | 8.50k | CNodeState& state = *Assert(State(node_id)); |
1199 | | |
1200 | 8.50k | if (state.vBlocksInFlight.begin() == list_it) { Branch (1200:13): [True: 5.20k, False: 3.29k]
|
1201 | | // First block on the queue was received, update the start download time for the next one |
1202 | 5.20k | state.m_downloading_since = std::max(state.m_downloading_since, GetTime<std::chrono::microseconds>()); |
1203 | 5.20k | } |
1204 | 8.50k | state.vBlocksInFlight.erase(list_it); |
1205 | | |
1206 | 8.50k | if (state.vBlocksInFlight.empty()) { Branch (1206:13): [True: 4.93k, False: 3.57k]
|
1207 | | // Last validated block on the queue for this peer was received. |
1208 | 4.93k | m_peers_downloading_from--; |
1209 | 4.93k | } |
1210 | 8.50k | state.m_stalling_since = 0us; |
1211 | | |
1212 | 8.50k | range.first = mapBlocksInFlight.erase(range.first); |
1213 | 8.50k | } |
1214 | 9.06k | } |
1215 | | |
1216 | | bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit) |
1217 | 11.5k | { |
1218 | 11.5k | const uint256& hash{block.GetBlockHash()}; |
1219 | | |
1220 | 11.5k | CNodeState *state = State(nodeid); |
1221 | 11.5k | assert(state != nullptr); Branch (1221:5): [True: 11.5k, False: 0]
|
1222 | | |
1223 | 11.5k | Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK); |
1224 | | |
1225 | | // Short-circuit most stuff in case it is from the same node |
1226 | 11.5k | for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) { Branch (1226:60): [True: 0, False: 11.5k]
|
1227 | 0 | if (range.first->second.first == nodeid) { Branch (1227:13): [True: 0, False: 0]
|
1228 | 0 | if (pit) { Branch (1228:17): [True: 0, False: 0]
|
1229 | 0 | *pit = &range.first->second.second; |
1230 | 0 | } |
1231 | 0 | return false; |
1232 | 0 | } |
1233 | 0 | } |
1234 | | |
1235 | | // Make sure it's not being fetched already from same peer. |
1236 | 11.5k | RemoveBlockRequest(hash, nodeid); |
1237 | | |
1238 | 11.5k | std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), |
1239 | 11.5k | {&block, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)}); Branch (1239:64): [True: 0, False: 11.5k]
|
1240 | 11.5k | if (state->vBlocksInFlight.size() == 1) { Branch (1240:9): [True: 6.11k, False: 5.41k]
|
1241 | | // We're starting a block download (batch) from this peer. |
1242 | 6.11k | state->m_downloading_since = GetTime<std::chrono::microseconds>(); |
1243 | 6.11k | m_peers_downloading_from++; |
1244 | 6.11k | } |
1245 | 11.5k | auto itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))); |
1246 | 11.5k | if (pit) { Branch (1246:9): [True: 0, False: 11.5k]
|
1247 | 0 | *pit = &itInFlight->second.second; |
1248 | 0 | } |
1249 | 11.5k | return true; |
1250 | 11.5k | } |
1251 | | |
1252 | | void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) |
1253 | 2.22M | { |
1254 | 2.22M | AssertLockHeld(cs_main); |
1255 | | |
1256 | | // When in -blocksonly mode, never request high-bandwidth mode from peers. Our |
1257 | | // mempool will not contain the transactions necessary to reconstruct the |
1258 | | // compact block. |
1259 | 2.22M | if (m_opts.ignore_incoming_txs) return; Branch (1259:9): [True: 0, False: 2.22M]
|
1260 | | |
1261 | 2.22M | CNodeState* nodestate = State(nodeid); |
1262 | 2.22M | PeerRef peer{GetPeerRef(nodeid)}; |
1263 | 2.22M | if (!nodestate || !nodestate->m_provides_cmpctblocks) { Branch (1263:9): [True: 70, False: 2.22M]
Branch (1263:23): [True: 0, False: 2.22M]
|
1264 | | // Don't request compact blocks if the peer has not signalled support |
1265 | 70 | return; |
1266 | 70 | } |
1267 | | |
1268 | 2.22M | int num_outbound_hb_peers = 0; |
1269 | 2.22M | for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) { Branch (1269:81): [True: 2.21M, False: 13.2k]
|
1270 | 2.21M | if (*it == nodeid) { Branch (1270:13): [True: 2.20M, False: 3.93k]
|
1271 | 2.20M | lNodesAnnouncingHeaderAndIDs.erase(it); |
1272 | 2.20M | lNodesAnnouncingHeaderAndIDs.push_back(nodeid); |
1273 | 2.20M | return; |
1274 | 2.20M | } |
1275 | 3.93k | PeerRef peer_ref{GetPeerRef(*it)}; |
1276 | 3.93k | if (peer_ref && !peer_ref->m_is_inbound) ++num_outbound_hb_peers; Branch (1276:13): [True: 3.63k, False: 299]
Branch (1276:25): [True: 3.12k, False: 517]
|
1277 | 3.93k | } |
1278 | 13.2k | if (peer && peer->m_is_inbound) { Branch (1278:9): [True: 13.2k, False: 0]
Branch (1278:17): [True: 1.23k, False: 12.0k]
|
1279 | | // If we're adding an inbound HB peer, make sure we're not removing |
1280 | | // our last outbound HB peer in the process. |
1281 | 1.23k | if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1) { Branch (1281:13): [True: 99, False: 1.13k]
Branch (1281:57): [True: 44, False: 55]
|
1282 | 44 | PeerRef remove_peer{GetPeerRef(lNodesAnnouncingHeaderAndIDs.front())}; |
1283 | 44 | if (remove_peer && !remove_peer->m_is_inbound) { Branch (1283:17): [True: 31, False: 13]
Branch (1283:32): [True: 16, False: 15]
|
1284 | | // Put the HB outbound peer in the second slot, so that it |
1285 | | // doesn't get removed. |
1286 | 16 | std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin())); |
1287 | 16 | } |
1288 | 44 | } |
1289 | 1.23k | } |
1290 | 13.2k | m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { |
1291 | 13.2k | AssertLockHeld(::cs_main); |
1292 | 13.2k | if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { Branch (1292:13): [True: 131, False: 13.1k]
|
1293 | | // As per BIP152, we only get 3 of our peers to announce |
1294 | | // blocks using compact encodings. |
1295 | 131 | m_connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [this](CNode* pnodeStop){ |
1296 | 102 | MakeAndPushMessage(*pnodeStop, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION); |
1297 | | // save BIP152 bandwidth state: we select peer to be low-bandwidth |
1298 | 102 | pnodeStop->m_bip152_highbandwidth_to = false; |
1299 | 102 | return true; |
1300 | 102 | }); |
1301 | 131 | lNodesAnnouncingHeaderAndIDs.pop_front(); |
1302 | 131 | } |
1303 | 13.2k | MakeAndPushMessage(*pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/true, /*version=*/CMPCTBLOCKS_VERSION); |
1304 | | // save BIP152 bandwidth state: we select peer to be high-bandwidth |
1305 | 13.2k | pfrom->m_bip152_highbandwidth_to = true; |
1306 | 13.2k | lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); |
1307 | 13.2k | return true; |
1308 | 13.2k | }); |
1309 | 13.2k | } |
1310 | | |
1311 | | bool PeerManagerImpl::TipMayBeStale() |
1312 | 0 | { |
1313 | 0 | AssertLockHeld(cs_main); |
1314 | 0 | const Consensus::Params& consensusParams = m_chainparams.GetConsensus(); |
1315 | 0 | if (m_last_tip_update.load() == 0s) { Branch (1315:9): [True: 0, False: 0]
|
1316 | 0 | m_last_tip_update = GetTime<std::chrono::seconds>(); |
1317 | 0 | } |
1318 | 0 | return m_last_tip_update.load() < GetTime<std::chrono::seconds>() - std::chrono::seconds{consensusParams.nPowTargetSpacing * 3} && mapBlocksInFlight.empty(); Branch (1318:12): [True: 0, False: 0]
Branch (1318:136): [True: 0, False: 0]
|
1319 | 0 | } |
1320 | | |
1321 | | int64_t PeerManagerImpl::ApproximateBestBlockDepth() const |
1322 | 493 | { |
1323 | 493 | return (GetTime<std::chrono::seconds>() - m_best_block_time.load()).count() / m_chainparams.GetConsensus().nPowTargetSpacing; |
1324 | 493 | } |
1325 | | |
1326 | | bool PeerManagerImpl::CanDirectFetch() |
1327 | 24.5k | { |
1328 | 24.5k | return m_chainman.ActiveChain().Tip()->Time() > NodeClock::now() - m_chainparams.GetConsensus().PowTargetSpacing() * 20; |
1329 | 24.5k | } |
1330 | | |
1331 | | static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main) |
1332 | 35.1M | { |
1333 | 35.1M | if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) Branch (1333:9): [True: 43.9k, False: 35.1M]
Branch (1333:40): [True: 9.11k, False: 34.8k]
|
1334 | 9.11k | return true; |
1335 | 35.1M | if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) Branch (1335:9): [True: 12.4k, False: 35.1M]
Branch (1335:40): [True: 7.87k, False: 4.61k]
|
1336 | 7.87k | return true; |
1337 | 35.1M | return false; |
1338 | 35.1M | } |
1339 | | |
1340 | 101M | void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) { |
1341 | 101M | CNodeState *state = State(nodeid); |
1342 | 101M | assert(state != nullptr); Branch (1342:5): [True: 101M, False: 0]
|
1343 | | |
1344 | 101M | if (!state->hashLastUnknownBlock.IsNull()) { Branch (1344:9): [True: 180k, False: 101M]
|
1345 | 180k | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock); |
1346 | 180k | if (pindex && pindex->nChainWork > 0) { Branch (1346:13): [True: 123, False: 180k]
Branch (1346:13): [True: 123, False: 180k]
Branch (1346:23): [True: 123, False: 0]
|
1347 | 123 | if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) { Branch (1347:17): [True: 0, False: 123]
Branch (1347:59): [True: 37, False: 86]
|
1348 | 37 | state->pindexBestKnownBlock = pindex; |
1349 | 37 | } |
1350 | 123 | state->hashLastUnknownBlock.SetNull(); |
1351 | 123 | } |
1352 | 180k | } |
1353 | 101M | } |
1354 | | |
1355 | 111k | void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) { |
1356 | 111k | CNodeState *state = State(nodeid); |
1357 | 111k | assert(state != nullptr); Branch (1357:5): [True: 111k, False: 0]
|
1358 | | |
1359 | 111k | ProcessBlockAvailability(nodeid); |
1360 | | |
1361 | 111k | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash); |
1362 | 111k | if (pindex && pindex->nChainWork > 0) { Branch (1362:9): [True: 110k, False: 1.21k]
Branch (1362:9): [True: 110k, False: 1.21k]
Branch (1362:19): [True: 110k, False: 0]
|
1363 | | // An actually better block was announced. |
1364 | 110k | if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) { Branch (1364:13): [True: 88.7k, False: 21.5k]
Branch (1364:55): [True: 11.4k, False: 10.1k]
|
1365 | 100k | state->pindexBestKnownBlock = pindex; |
1366 | 100k | } |
1367 | 110k | } else { |
1368 | | // An unknown block was announced; just assume that the latest one is the best one. |
1369 | 1.21k | state->hashLastUnknownBlock = hash; |
1370 | 1.21k | } |
1371 | 111k | } |
1372 | | |
1373 | | // Logic for calculating which blocks to download from a given peer, given our current tip. |
1374 | | void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) |
1375 | 41.8M | { |
1376 | 41.8M | if (count == 0) Branch (1376:9): [True: 0, False: 41.8M]
|
1377 | 0 | return; |
1378 | | |
1379 | 41.8M | vBlocks.reserve(vBlocks.size() + count); |
1380 | 41.8M | CNodeState *state = State(peer.m_id); |
1381 | 41.8M | assert(state != nullptr); Branch (1381:5): [True: 41.8M, False: 0]
|
1382 | | |
1383 | | // Make sure pindexBestKnownBlock is up to date, we'll need it. |
1384 | 41.8M | ProcessBlockAvailability(peer.m_id); |
1385 | | |
1386 | 41.8M | if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < m_chainman.ActiveChain().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) { Branch (1386:9): [True: 20.3M, False: 21.4M]
Branch (1386:51): [True: 2.71M, False: 18.7M]
Branch (1386:139): [True: 0, False: 18.7M]
|
1387 | | // This peer has nothing interesting. |
1388 | 23.0M | return; |
1389 | 23.0M | } |
1390 | | |
1391 | | // When we sync with AssumeUtxo and discover the snapshot is not in the peer's best chain, abort: |
1392 | | // We can't reorg to this chain due to missing undo data until the background sync has finished, |
1393 | | // so downloading blocks from it would be futile. |
1394 | 18.7M | const CBlockIndex* snap_base{m_chainman.GetSnapshotBaseBlock()}; |
1395 | 18.7M | if (snap_base && state->pindexBestKnownBlock->GetAncestor(snap_base->nHeight) != snap_base) { Branch (1395:9): [True: 0, False: 18.7M]
Branch (1395:22): [True: 0, False: 0]
|
1396 | 0 | LogDebug(BCLog::NET, "Not downloading blocks from peer=%d, which doesn't have the snapshot block in its best chain.\n", peer.m_id); |
1397 | 0 | return; |
1398 | 0 | } |
1399 | | |
1400 | | // Bootstrap quickly by guessing a parent of our best tip is the forking point. |
1401 | | // Guessing wrong in either direction is not a problem. |
1402 | | // Also reset pindexLastCommonBlock after a snapshot was loaded, so that blocks after the snapshot will be prioritised for download. |
1403 | 18.7M | if (state->pindexLastCommonBlock == nullptr || Branch (1403:9): [True: 88.7k, False: 18.6M]
|
1404 | 18.7M | (snap_base && state->pindexLastCommonBlock->nHeight < snap_base->nHeight)) { Branch (1404:10): [True: 0, False: 18.6M]
Branch (1404:23): [True: 0, False: 0]
|
1405 | 88.7k | state->pindexLastCommonBlock = m_chainman.ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight, m_chainman.ActiveChain().Height())]; |
1406 | 88.7k | } |
1407 | | |
1408 | | // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor |
1409 | | // of its current tip anymore. Go back enough to fix that. |
1410 | 18.7M | state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock); |
1411 | 18.7M | if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) Branch (1411:9): [True: 18.3M, False: 382k]
|
1412 | 18.3M | return; |
1413 | | |
1414 | 382k | const CBlockIndex *pindexWalk = state->pindexLastCommonBlock; |
1415 | | // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last |
1416 | | // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to |
1417 | | // download that next block if the window were 1 larger. |
1418 | 382k | int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW; |
1419 | | |
1420 | 382k | FindNextBlocks(vBlocks, peer, state, pindexWalk, count, nWindowEnd, &m_chainman.ActiveChain(), &nodeStaller); |
1421 | 382k | } |
1422 | | |
1423 | | void PeerManagerImpl::TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex *from_tip, const CBlockIndex* target_block) |
1424 | 0 | { |
1425 | 0 | Assert(from_tip); |
1426 | 0 | Assert(target_block); |
1427 | |
|
1428 | 0 | if (vBlocks.size() >= count) { Branch (1428:9): [True: 0, False: 0]
|
1429 | 0 | return; |
1430 | 0 | } |
1431 | | |
1432 | 0 | vBlocks.reserve(count); |
1433 | 0 | CNodeState *state = Assert(State(peer.m_id)); |
1434 | |
|
1435 | 0 | if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->GetAncestor(target_block->nHeight) != target_block) { Branch (1435:9): [True: 0, False: 0]
Branch (1435:51): [True: 0, False: 0]
|
1436 | | // This peer can't provide us the complete series of blocks leading up to the |
1437 | | // assumeutxo snapshot base. |
1438 | | // |
1439 | | // Presumably this peer's chain has less work than our ActiveChain()'s tip, or else we |
1440 | | // will eventually crash when we try to reorg to it. Let other logic |
1441 | | // deal with whether we disconnect this peer. |
1442 | | // |
1443 | | // TODO at some point in the future, we might choose to request what blocks |
1444 | | // this peer does have from the historical chain, despite it not having a |
1445 | | // complete history beneath the snapshot base. |
1446 | 0 | return; |
1447 | 0 | } |
1448 | | |
1449 | 0 | FindNextBlocks(vBlocks, peer, state, from_tip, count, std::min<int>(from_tip->nHeight + BLOCK_DOWNLOAD_WINDOW, target_block->nHeight)); |
1450 | 0 | } |
1451 | | |
1452 | | void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain, NodeId* nodeStaller) |
1453 | 382k | { |
1454 | 382k | std::vector<const CBlockIndex*> vToFetch; |
1455 | 382k | int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1); |
1456 | 382k | bool is_limited_peer = IsLimitedPeer(peer); |
1457 | 382k | NodeId waitingfor = -1; |
1458 | 748k | while (pindexWalk->nHeight < nMaxHeight) { Branch (1458:12): [True: 382k, False: 366k]
|
1459 | | // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards |
1460 | | // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive |
1461 | | // as iterating over ~100 CBlockIndex* entries anyway. |
1462 | 382k | int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128)); |
1463 | 382k | vToFetch.resize(nToFetch); |
1464 | 382k | pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch); |
1465 | 382k | vToFetch[nToFetch - 1] = pindexWalk; |
1466 | 1.01M | for (unsigned int i = nToFetch - 1; i > 0; i--) { Branch (1466:45): [True: 636k, False: 382k]
|
1467 | 636k | vToFetch[i - 1] = vToFetch[i]->pprev; |
1468 | 636k | } |
1469 | | |
1470 | | // Iterate over those blocks in vToFetch (in forward direction), adding the ones that |
1471 | | // are not yet downloaded and not in flight to vBlocks. In the meantime, update |
1472 | | // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's |
1473 | | // already part of our chain (and therefore don't need it even if pruned). |
1474 | 1.01M | for (const CBlockIndex* pindex : vToFetch) { Branch (1474:40): [True: 1.01M, False: 366k]
|
1475 | 1.01M | if (!pindex->IsValid(BLOCK_VALID_TREE)) { Branch (1475:17): [True: 16.4k, False: 995k]
|
1476 | | // We consider the chain that this peer is on invalid. |
1477 | 16.4k | return; |
1478 | 16.4k | } |
1479 | | |
1480 | 995k | if (!CanServeWitnesses(peer) && DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) { Branch (1480:17): [True: 0, False: 995k]
Branch (1480:45): [True: 0, False: 0]
|
1481 | | // We wouldn't download this block or its descendants from this peer. |
1482 | 0 | return; |
1483 | 0 | } |
1484 | | |
1485 | 995k | if (pindex->nStatus & BLOCK_HAVE_DATA || (activeChain && activeChain->Contains(pindex))) { Branch (1485:17): [True: 406k, False: 589k]
Branch (1485:55): [True: 589k, False: 0]
Branch (1485:70): [True: 0, False: 589k]
|
1486 | 406k | if (activeChain && pindex->HaveNumChainTxs()) { Branch (1486:21): [True: 406k, False: 0]
Branch (1486:36): [True: 6.86k, False: 399k]
|
1487 | 6.86k | state->pindexLastCommonBlock = pindex; |
1488 | 6.86k | } |
1489 | 406k | continue; |
1490 | 406k | } |
1491 | | |
1492 | | // Is block in-flight? |
1493 | 589k | if (IsBlockRequested(pindex->GetBlockHash())) { Branch (1493:17): [True: 582k, False: 6.87k]
|
1494 | 582k | if (waitingfor == -1) { Branch (1494:21): [True: 359k, False: 223k]
|
1495 | | // This is the first already-in-flight block. |
1496 | 359k | waitingfor = mapBlocksInFlight.lower_bound(pindex->GetBlockHash())->second.first; |
1497 | 359k | } |
1498 | 582k | continue; |
1499 | 582k | } |
1500 | | |
1501 | | // The block is not already downloaded, and not yet in flight. |
1502 | 6.87k | if (pindex->nHeight > nWindowEnd) { Branch (1502:17): [True: 0, False: 6.87k]
|
1503 | | // We reached the end of the window. |
1504 | 0 | if (vBlocks.size() == 0 && waitingfor != peer.m_id) { Branch (1504:21): [True: 0, False: 0]
Branch (1504:44): [True: 0, False: 0]
|
1505 | | // We aren't able to fetch anything, but we would be if the download window was one larger. |
1506 | 0 | if (nodeStaller) *nodeStaller = waitingfor; Branch (1506:25): [True: 0, False: 0]
|
1507 | 0 | } |
1508 | 0 | return; |
1509 | 0 | } |
1510 | | |
1511 | | // Don't request blocks that go further than what limited peers can provide |
1512 | 6.87k | if (is_limited_peer && (state->pindexBestKnownBlock->nHeight - pindex->nHeight >= static_cast<int>(NODE_NETWORK_LIMITED_MIN_BLOCKS) - 2 /* two blocks buffer for possible races */)) { Branch (1512:17): [True: 0, False: 6.87k]
Branch (1512:36): [True: 0, False: 0]
|
1513 | 0 | continue; |
1514 | 0 | } |
1515 | | |
1516 | 6.87k | vBlocks.push_back(pindex); |
1517 | 6.87k | if (vBlocks.size() == count) { Branch (1517:17): [True: 18, False: 6.85k]
|
1518 | 18 | return; |
1519 | 18 | } |
1520 | 6.87k | } |
1521 | 382k | } |
1522 | 382k | } |
1523 | | |
1524 | | } // namespace |
1525 | | |
1526 | | void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer) |
1527 | 88.7k | { |
1528 | 88.7k | uint64_t my_services{peer.m_our_services}; |
1529 | 88.7k | const int64_t nTime{count_seconds(GetTime<std::chrono::seconds>())}; |
1530 | 88.7k | uint64_t nonce = pnode.GetLocalNonce(); |
1531 | 88.7k | const int nNodeStartingHeight{m_best_height}; |
1532 | 88.7k | NodeId nodeid = pnode.GetId(); |
1533 | 88.7k | CAddress addr = pnode.addr; |
1534 | | |
1535 | 88.7k | CService addr_you = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ? addr : CService(); Branch (1535:25): [True: 0, False: 88.7k]
Branch (1535:46): [True: 0, False: 0]
Branch (1535:64): [True: 0, False: 0]
|
1536 | 88.7k | uint64_t your_services{addr.nServices}; |
1537 | | |
1538 | 88.7k | const bool tx_relay{!RejectIncomingTxs(pnode)}; |
1539 | 88.7k | MakeAndPushMessage(pnode, NetMsgType::VERSION, PROTOCOL_VERSION, my_services, nTime, |
1540 | 88.7k | your_services, CNetAddr::V1(addr_you), // Together the pre-version-31402 serialization of CAddress "addrYou" (without nTime) |
1541 | 88.7k | my_services, CNetAddr::V1(CService{}), // Together the pre-version-31402 serialization of CAddress "addrMe" (without nTime) |
1542 | 88.7k | nonce, strSubVersion, nNodeStartingHeight, tx_relay); |
1543 | | |
1544 | 88.7k | if (fLogIPs) { Branch (1544:9): [True: 0, False: 88.7k]
|
1545 | 0 | LogDebug(BCLog::NET, "send version message: version %d, blocks=%d, them=%s, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addr_you.ToStringAddrPort(), tx_relay, nodeid); |
1546 | 88.7k | } else { |
1547 | 88.7k | LogDebug(BCLog::NET, "send version message: version %d, blocks=%d, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, tx_relay, nodeid); |
1548 | 88.7k | } |
1549 | 88.7k | } |
1550 | | |
1551 | | void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) |
1552 | 0 | { |
1553 | 0 | LOCK(cs_main); |
1554 | 0 | CNodeState *state = State(node); |
1555 | 0 | if (state) state->m_last_block_announcement = time_in_seconds; Branch (1555:9): [True: 0, False: 0]
|
1556 | 0 | } |
1557 | | |
1558 | | void PeerManagerImpl::InitializeNode(const CNode& node, ServiceFlags our_services) |
1559 | 88.7k | { |
1560 | 88.7k | NodeId nodeid = node.GetId(); |
1561 | 88.7k | { |
1562 | 88.7k | LOCK(cs_main); // For m_node_states |
1563 | 88.7k | m_node_states.try_emplace(m_node_states.end(), nodeid); |
1564 | 88.7k | } |
1565 | 88.7k | WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty(nodeid)); |
1566 | | |
1567 | 88.7k | if (NetPermissions::HasFlag(node.m_permission_flags, NetPermissionFlags::BloomFilter)) { Branch (1567:9): [True: 0, False: 88.7k]
|
1568 | 0 | our_services = static_cast<ServiceFlags>(our_services | NODE_BLOOM); |
1569 | 0 | } |
1570 | | |
1571 | 88.7k | PeerRef peer = std::make_shared<Peer>(nodeid, our_services, node.IsInboundConn()); |
1572 | 88.7k | { |
1573 | 88.7k | LOCK(m_peer_mutex); |
1574 | 88.7k | m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer); |
1575 | 88.7k | } |
1576 | 88.7k | } |
1577 | | |
1578 | | void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler) |
1579 | 0 | { |
1580 | 0 | std::set<uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs(); |
1581 | |
|
1582 | 0 | for (const auto& txid : unbroadcast_txids) { Branch (1582:27): [True: 0, False: 0]
|
1583 | 0 | CTransactionRef tx = m_mempool.get(txid); |
1584 | |
|
1585 | 0 | if (tx != nullptr) { Branch (1585:13): [True: 0, False: 0]
|
1586 | 0 | RelayTransaction(txid, tx->GetWitnessHash()); |
1587 | 0 | } else { |
1588 | 0 | m_mempool.RemoveUnbroadcastTx(txid, true); |
1589 | 0 | } |
1590 | 0 | } |
1591 | | |
1592 | | // Schedule next run for 10-15 minutes in the future. |
1593 | | // We add randomness on every cycle to avoid the possibility of P2P fingerprinting. |
1594 | 0 | const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min); |
1595 | 0 | scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta); |
1596 | 0 | } |
1597 | | |
1598 | | void PeerManagerImpl::FinalizeNode(const CNode& node) |
1599 | 88.7k | { |
1600 | 88.7k | NodeId nodeid = node.GetId(); |
1601 | 88.7k | { |
1602 | 88.7k | LOCK(cs_main); |
1603 | 88.7k | { |
1604 | | // We remove the PeerRef from g_peer_map here, but we don't always |
1605 | | // destruct the Peer. Sometimes another thread is still holding a |
1606 | | // PeerRef, so the refcount is >= 1. Be careful not to do any |
1607 | | // processing here that assumes Peer won't be changed before it's |
1608 | | // destructed. |
1609 | 88.7k | PeerRef peer = RemovePeer(nodeid); |
1610 | 88.7k | assert(peer != nullptr); Branch (1610:9): [True: 88.7k, False: 0]
|
1611 | 88.7k | m_wtxid_relay_peers -= peer->m_wtxid_relay; |
1612 | 88.7k | assert(m_wtxid_relay_peers >= 0); Branch (1612:9): [True: 88.7k, False: 0]
|
1613 | 88.7k | } |
1614 | 88.7k | CNodeState *state = State(nodeid); |
1615 | 88.7k | assert(state != nullptr); Branch (1615:5): [True: 88.7k, False: 0]
|
1616 | | |
1617 | 88.7k | if (state->fSyncStarted) Branch (1617:9): [True: 88.7k, False: 0]
|
1618 | 88.7k | nSyncStarted--; |
1619 | | |
1620 | 88.7k | for (const QueuedBlock& entry : state->vBlocksInFlight) { Branch (1620:35): [True: 3.02k, False: 88.7k]
|
1621 | 3.02k | auto range = mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash()); |
1622 | 6.05k | while (range.first != range.second) { Branch (1622:16): [True: 3.02k, False: 3.02k]
|
1623 | 3.02k | auto [node_id, list_it] = range.first->second; |
1624 | 3.02k | if (node_id != nodeid) { Branch (1624:17): [True: 0, False: 3.02k]
|
1625 | 0 | range.first++; |
1626 | 3.02k | } else { |
1627 | 3.02k | range.first = mapBlocksInFlight.erase(range.first); |
1628 | 3.02k | } |
1629 | 3.02k | } |
1630 | 3.02k | } |
1631 | 88.7k | { |
1632 | 88.7k | LOCK(m_tx_download_mutex); |
1633 | 88.7k | m_txdownloadman.DisconnectedPeer(nodeid); |
1634 | 88.7k | } |
1635 | 88.7k | if (m_txreconciliation) m_txreconciliation->ForgetPeer(nodeid); Branch (1635:9): [True: 88.7k, False: 0]
|
1636 | 88.7k | m_num_preferred_download_peers -= state->fPreferredDownload; |
1637 | 88.7k | m_peers_downloading_from -= (!state->vBlocksInFlight.empty()); |
1638 | 88.7k | assert(m_peers_downloading_from >= 0); Branch (1638:5): [True: 88.7k, False: 0]
|
1639 | 88.7k | m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; |
1640 | 88.7k | assert(m_outbound_peers_with_protect_from_disconnect >= 0); Branch (1640:5): [True: 88.7k, False: 0]
|
1641 | | |
1642 | 88.7k | m_node_states.erase(nodeid); |
1643 | | |
1644 | 88.7k | if (m_node_states.empty()) { Branch (1644:9): [True: 11.0k, False: 77.6k]
|
1645 | | // Do a consistency check after the last peer is removed. |
1646 | 11.0k | assert(mapBlocksInFlight.empty()); Branch (1646:9): [True: 11.0k, False: 0]
|
1647 | 11.0k | assert(m_num_preferred_download_peers == 0); Branch (1647:9): [True: 11.0k, False: 0]
|
1648 | 11.0k | assert(m_peers_downloading_from == 0); Branch (1648:9): [True: 11.0k, False: 0]
|
1649 | 11.0k | assert(m_outbound_peers_with_protect_from_disconnect == 0); Branch (1649:9): [True: 11.0k, False: 0]
|
1650 | 11.0k | assert(m_wtxid_relay_peers == 0); Branch (1650:9): [True: 11.0k, False: 0]
|
1651 | 11.0k | WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty()); |
1652 | 11.0k | } |
1653 | 88.7k | } // cs_main |
1654 | 88.7k | if (node.fSuccessfullyConnected && Branch (1654:9): [True: 88.7k, False: 0]
|
1655 | 88.7k | !node.IsBlockOnlyConn() && !node.IsInboundConn()) { Branch (1655:9): [True: 88.7k, False: 0]
Branch (1655:36): [True: 44.3k, False: 44.3k]
|
1656 | | // Only change visible addrman state for full outbound peers. We don't |
1657 | | // call Connected() for feeler connections since they don't have |
1658 | | // fSuccessfullyConnected set. |
1659 | 44.3k | m_addrman.Connected(node.addr); |
1660 | 44.3k | } |
1661 | 88.7k | { |
1662 | 88.7k | LOCK(m_headers_presync_mutex); |
1663 | 88.7k | m_headers_presync_stats.erase(nodeid); |
1664 | 88.7k | } |
1665 | 88.7k | LogDebug(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid); |
1666 | 88.7k | } |
1667 | | |
1668 | | bool PeerManagerImpl::HasAllDesirableServiceFlags(ServiceFlags services) const |
1669 | 134k | { |
1670 | | // Shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services) |
1671 | 134k | return !(GetDesirableServiceFlags(services) & (~services)); |
1672 | 134k | } |
1673 | | |
1674 | | ServiceFlags PeerManagerImpl::GetDesirableServiceFlags(ServiceFlags services) const |
1675 | 134k | { |
1676 | 134k | if (services & NODE_NETWORK_LIMITED) { Branch (1676:9): [True: 493, False: 133k]
|
1677 | | // Limited peers are desirable when we are close to the tip. |
1678 | 493 | if (ApproximateBestBlockDepth() < NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS) { Branch (1678:13): [True: 42, False: 451]
|
1679 | 42 | return ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS); |
1680 | 42 | } |
1681 | 493 | } |
1682 | 134k | return ServiceFlags(NODE_NETWORK | NODE_WITNESS); |
1683 | 134k | } |
1684 | | |
1685 | | PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const |
1686 | 93.3M | { |
1687 | 93.3M | LOCK(m_peer_mutex); |
1688 | 93.3M | auto it = m_peer_map.find(id); |
1689 | 93.3M | return it != m_peer_map.end() ? it->second : nullptr; Branch (1689:12): [True: 93.3M, False: 382]
|
1690 | 93.3M | } |
1691 | | |
1692 | | PeerRef PeerManagerImpl::RemovePeer(NodeId id) |
1693 | 88.7k | { |
1694 | 88.7k | PeerRef ret; |
1695 | 88.7k | LOCK(m_peer_mutex); |
1696 | 88.7k | auto it = m_peer_map.find(id); |
1697 | 88.7k | if (it != m_peer_map.end()) { Branch (1697:9): [True: 88.7k, False: 0]
|
1698 | 88.7k | ret = std::move(it->second); |
1699 | 88.7k | m_peer_map.erase(it); |
1700 | 88.7k | } |
1701 | 88.7k | return ret; |
1702 | 88.7k | } |
1703 | | |
1704 | | bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const |
1705 | 0 | { |
1706 | 0 | { |
1707 | 0 | LOCK(cs_main); |
1708 | 0 | const CNodeState* state = State(nodeid); |
1709 | 0 | if (state == nullptr) Branch (1709:13): [True: 0, False: 0]
|
1710 | 0 | return false; |
1711 | 0 | stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1; Branch (1711:29): [True: 0, False: 0]
|
1712 | 0 | stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1; Branch (1712:31): [True: 0, False: 0]
|
1713 | 0 | for (const QueuedBlock& queue : state->vBlocksInFlight) { Branch (1713:39): [True: 0, False: 0]
|
1714 | 0 | if (queue.pindex) Branch (1714:17): [True: 0, False: 0]
|
1715 | 0 | stats.vHeightInFlight.push_back(queue.pindex->nHeight); |
1716 | 0 | } |
1717 | 0 | } |
1718 | | |
1719 | 0 | PeerRef peer = GetPeerRef(nodeid); |
1720 | 0 | if (peer == nullptr) return false; Branch (1720:9): [True: 0, False: 0]
|
1721 | 0 | stats.their_services = peer->m_their_services; |
1722 | 0 | stats.m_starting_height = peer->m_starting_height; |
1723 | | // It is common for nodes with good ping times to suddenly become lagged, |
1724 | | // due to a new block arriving or other large transfer. |
1725 | | // Merely reporting pingtime might fool the caller into thinking the node was still responsive, |
1726 | | // since pingtime does not update until the ping is complete, which might take a while. |
1727 | | // So, if a ping is taking an unusually long time in flight, |
1728 | | // the caller can immediately detect that this is happening. |
1729 | 0 | auto ping_wait{0us}; |
1730 | 0 | if ((0 != peer->m_ping_nonce_sent) && (0 != peer->m_ping_start.load().count())) { Branch (1730:9): [True: 0, False: 0]
Branch (1730:9): [True: 0, False: 0]
Branch (1730:43): [True: 0, False: 0]
|
1731 | 0 | ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load(); |
1732 | 0 | } |
1733 | |
|
1734 | 0 | if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { Branch (1734:45): [True: 0, False: 0]
|
1735 | 0 | stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs); |
1736 | 0 | stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load(); |
1737 | 0 | } else { |
1738 | 0 | stats.m_relay_txs = false; |
1739 | 0 | stats.m_fee_filter_received = 0; |
1740 | 0 | } |
1741 | |
|
1742 | 0 | stats.m_ping_wait = ping_wait; |
1743 | 0 | stats.m_addr_processed = peer->m_addr_processed.load(); |
1744 | 0 | stats.m_addr_rate_limited = peer->m_addr_rate_limited.load(); |
1745 | 0 | stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load(); |
1746 | 0 | { |
1747 | 0 | LOCK(peer->m_headers_sync_mutex); |
1748 | 0 | if (peer->m_headers_sync) { Branch (1748:13): [True: 0, False: 0]
|
1749 | 0 | stats.presync_height = peer->m_headers_sync->GetPresyncHeight(); |
1750 | 0 | } |
1751 | 0 | } |
1752 | 0 | stats.time_offset = peer->m_time_offset; |
1753 | |
|
1754 | 0 | return true; |
1755 | 0 | } |
1756 | | |
1757 | | std::vector<TxOrphanage::OrphanTxBase> PeerManagerImpl::GetOrphanTransactions() |
1758 | 0 | { |
1759 | 0 | LOCK(m_tx_download_mutex); |
1760 | 0 | return m_txdownloadman.GetOrphanTransactions(); |
1761 | 0 | } |
1762 | | |
1763 | | PeerManagerInfo PeerManagerImpl::GetInfo() const |
1764 | 0 | { |
1765 | 0 | return PeerManagerInfo{ |
1766 | 0 | .median_outbound_time_offset = m_outbound_time_offsets.Median(), |
1767 | 0 | .ignores_incoming_txs = m_opts.ignore_incoming_txs, |
1768 | 0 | }; |
1769 | 0 | } |
1770 | | |
1771 | | void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx) |
1772 | 346k | { |
1773 | 346k | if (m_opts.max_extra_txs <= 0) Branch (1773:9): [True: 0, False: 346k]
|
1774 | 0 | return; |
1775 | 346k | if (!vExtraTxnForCompact.size()) Branch (1775:9): [True: 7.53k, False: 338k]
|
1776 | 7.53k | vExtraTxnForCompact.resize(m_opts.max_extra_txs); |
1777 | 346k | vExtraTxnForCompact[vExtraTxnForCompactIt] = tx; |
1778 | 346k | vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs; |
1779 | 346k | } |
1780 | | |
1781 | | void PeerManagerImpl::Misbehaving(Peer& peer, const std::string& message) |
1782 | 6.21k | { |
1783 | 6.21k | LOCK(peer.m_misbehavior_mutex); |
1784 | | |
1785 | 6.21k | const std::string message_prefixed = message.empty() ? "" : (": " + message); Branch (1785:42): [True: 5.86k, False: 351]
|
1786 | 6.21k | peer.m_should_discourage = true; |
1787 | 6.21k | LogDebug(BCLog::NET, "Misbehaving: peer=%d%s\n", peer.m_id, message_prefixed); |
1788 | 6.21k | TRACEPOINT(net, misbehaving_connection, |
1789 | 6.21k | peer.m_id, |
1790 | 6.21k | message.c_str() |
1791 | 6.21k | ); |
1792 | 6.21k | } |
1793 | | |
1794 | | void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state, |
1795 | | bool via_compact_block, const std::string& message) |
1796 | 6.27k | { |
1797 | 6.27k | PeerRef peer{GetPeerRef(nodeid)}; |
1798 | 6.27k | switch (state.GetResult()) { Branch (1798:13): [True: 0, False: 6.27k]
|
1799 | 0 | case BlockValidationResult::BLOCK_RESULT_UNSET: Branch (1799:5): [True: 0, False: 6.27k]
|
1800 | 0 | break; |
1801 | 0 | case BlockValidationResult::BLOCK_HEADER_LOW_WORK: Branch (1801:5): [True: 0, False: 6.27k]
|
1802 | | // We didn't try to process the block because the header chain may have |
1803 | | // too little work. |
1804 | 0 | break; |
1805 | | // The node is providing invalid data: |
1806 | 2.33k | case BlockValidationResult::BLOCK_CONSENSUS: Branch (1806:5): [True: 2.33k, False: 3.94k]
|
1807 | 2.37k | case BlockValidationResult::BLOCK_MUTATED: Branch (1807:5): [True: 45, False: 6.23k]
|
1808 | 2.37k | if (!via_compact_block) { Branch (1808:13): [True: 2.37k, False: 0]
|
1809 | 2.37k | if (peer) Misbehaving(*peer, message); Branch (1809:17): [True: 2.37k, False: 0]
|
1810 | 2.37k | return; |
1811 | 2.37k | } |
1812 | 0 | break; |
1813 | 126 | case BlockValidationResult::BLOCK_CACHED_INVALID: Branch (1813:5): [True: 126, False: 6.15k]
|
1814 | 126 | { |
1815 | | // Discourage outbound (but not inbound) peers if on an invalid chain. |
1816 | | // Exempt HB compact block peers. Manual connections are always protected from discouragement. |
1817 | 126 | if (peer && !via_compact_block && !peer->m_is_inbound) { Branch (1817:17): [True: 126, False: 0]
Branch (1817:25): [True: 126, False: 0]
Branch (1817:47): [True: 37, False: 89]
|
1818 | 37 | if (peer) Misbehaving(*peer, message); Branch (1818:21): [True: 37, False: 0]
|
1819 | 37 | return; |
1820 | 37 | } |
1821 | 89 | break; |
1822 | 126 | } |
1823 | 89 | case BlockValidationResult::BLOCK_INVALID_HEADER: Branch (1823:5): [True: 75, False: 6.20k]
|
1824 | 117 | case BlockValidationResult::BLOCK_INVALID_PREV: Branch (1824:5): [True: 42, False: 6.23k]
|
1825 | 117 | if (peer) Misbehaving(*peer, message); Branch (1825:13): [True: 117, False: 0]
|
1826 | 117 | return; |
1827 | | // Conflicting (but not necessarily invalid) data or different policy: |
1828 | 254 | case BlockValidationResult::BLOCK_MISSING_PREV: Branch (1828:5): [True: 254, False: 6.02k]
|
1829 | 254 | if (peer) Misbehaving(*peer, message); Branch (1829:13): [True: 254, False: 0]
|
1830 | 254 | return; |
1831 | 3.40k | case BlockValidationResult::BLOCK_TIME_FUTURE: Branch (1831:5): [True: 3.40k, False: 2.87k]
|
1832 | 3.40k | break; |
1833 | 6.27k | } |
1834 | 3.49k | if (message != "") { Branch (1834:9): [True: 1.39k, False: 2.10k]
|
1835 | 1.39k | LogDebug(BCLog::NET, "peer=%d: %s\n", nodeid, message); |
1836 | 1.39k | } |
1837 | 3.49k | } |
1838 | | |
1839 | | void PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state) |
1840 | 353k | { |
1841 | 353k | PeerRef peer{GetPeerRef(nodeid)}; |
1842 | 353k | switch (state.GetResult()) { Branch (1842:13): [True: 0, False: 353k]
|
1843 | 0 | case TxValidationResult::TX_RESULT_UNSET: Branch (1843:5): [True: 0, False: 353k]
|
1844 | 0 | break; |
1845 | | // The node is providing invalid data: |
1846 | 3.14k | case TxValidationResult::TX_CONSENSUS: Branch (1846:5): [True: 3.14k, False: 349k]
|
1847 | 3.14k | if (peer) Misbehaving(*peer, ""); Branch (1847:13): [True: 3.14k, False: 0]
|
1848 | 3.14k | return; |
1849 | | // Conflicting (but not necessarily invalid) data or different policy: |
1850 | 48 | case TxValidationResult::TX_INPUTS_NOT_STANDARD: Branch (1850:5): [True: 48, False: 353k]
|
1851 | 11.7k | case TxValidationResult::TX_NOT_STANDARD: Branch (1851:5): [True: 11.7k, False: 341k]
|
1852 | 335k | case TxValidationResult::TX_MISSING_INPUTS: Branch (1852:5): [True: 324k, False: 29.0k]
|
1853 | 336k | case TxValidationResult::TX_PREMATURE_SPEND: Branch (1853:5): [True: 953, False: 352k]
|
1854 | 336k | case TxValidationResult::TX_WITNESS_MUTATED: Branch (1854:5): [True: 30, False: 353k]
|
1855 | 337k | case TxValidationResult::TX_WITNESS_STRIPPED: Branch (1855:5): [True: 982, False: 352k]
|
1856 | 337k | case TxValidationResult::TX_CONFLICT: Branch (1856:5): [True: 75, False: 353k]
|
1857 | 343k | case TxValidationResult::TX_MEMPOOL_POLICY: Branch (1857:5): [True: 5.77k, False: 347k]
|
1858 | 343k | case TxValidationResult::TX_NO_MEMPOOL: Branch (1858:5): [True: 0, False: 353k]
|
1859 | 349k | case TxValidationResult::TX_RECONSIDERABLE: Branch (1859:5): [True: 6.25k, False: 346k]
|
1860 | 349k | case TxValidationResult::TX_UNKNOWN: Branch (1860:5): [True: 0, False: 353k]
|
1861 | 349k | break; |
1862 | 353k | } |
1863 | 353k | } |
1864 | | |
1865 | | bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex) |
1866 | 23.3k | { |
1867 | 23.3k | AssertLockHeld(cs_main); |
1868 | 23.3k | if (m_chainman.ActiveChain().Contains(pindex)) return true; Branch (1868:9): [True: 4.72k, False: 18.6k]
|
1869 | 18.6k | return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (m_chainman.m_best_header != nullptr) && Branch (1869:12): [True: 1.39k, False: 17.2k]
Branch (1869:52): [True: 1.39k, False: 0]
|
1870 | 18.6k | (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) && Branch (1870:12): [True: 589, False: 807]
|
1871 | 18.6k | (GetBlockProofEquivalentTime(*m_chainman.m_best_header, *pindex, *m_chainman.m_best_header, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT); Branch (1871:12): [True: 589, False: 0]
|
1872 | 23.3k | } |
1873 | | |
1874 | | std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index) |
1875 | 0 | { |
1876 | 0 | if (m_chainman.m_blockman.LoadingBlocks()) return "Loading blocks ..."; Branch (1876:9): [True: 0, False: 0]
|
1877 | | |
1878 | | // Ensure this peer exists and hasn't been disconnected |
1879 | 0 | PeerRef peer = GetPeerRef(peer_id); |
1880 | 0 | if (peer == nullptr) return "Peer does not exist"; Branch (1880:9): [True: 0, False: 0]
|
1881 | | |
1882 | | // Ignore pre-segwit peers |
1883 | 0 | if (!CanServeWitnesses(*peer)) return "Pre-SegWit peer"; Branch (1883:9): [True: 0, False: 0]
|
1884 | | |
1885 | 0 | LOCK(cs_main); |
1886 | | |
1887 | | // Forget about all prior requests |
1888 | 0 | RemoveBlockRequest(block_index.GetBlockHash(), std::nullopt); |
1889 | | |
1890 | | // Mark block as in-flight |
1891 | 0 | if (!BlockRequested(peer_id, block_index)) return "Already requested from this peer"; Branch (1891:9): [True: 0, False: 0]
|
1892 | | |
1893 | | // Construct message to request the block |
1894 | 0 | const uint256& hash{block_index.GetBlockHash()}; |
1895 | 0 | std::vector<CInv> invs{CInv(MSG_BLOCK | MSG_WITNESS_FLAG, hash)}; |
1896 | | |
1897 | | // Send block request message to the peer |
1898 | 0 | bool success = m_connman.ForNode(peer_id, [this, &invs](CNode* node) { |
1899 | 0 | this->MakeAndPushMessage(*node, NetMsgType::GETDATA, invs); |
1900 | 0 | return true; |
1901 | 0 | }); |
1902 | |
|
1903 | 0 | if (!success) return "Peer not fully connected"; Branch (1903:9): [True: 0, False: 0]
|
1904 | | |
1905 | 0 | LogDebug(BCLog::NET, "Requesting block %s from peer=%d\n", |
1906 | 0 | hash.ToString(), peer_id); |
1907 | 0 | return std::nullopt; |
1908 | 0 | } |
1909 | | |
1910 | | std::unique_ptr<PeerManager> PeerManager::make(CConnman& connman, AddrMan& addrman, |
1911 | | BanMan* banman, ChainstateManager& chainman, |
1912 | | CTxMemPool& pool, node::Warnings& warnings, Options opts) |
1913 | 11.0k | { |
1914 | 11.0k | return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman, pool, warnings, opts); |
1915 | 11.0k | } |
1916 | | |
1917 | | PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman, |
1918 | | BanMan* banman, ChainstateManager& chainman, |
1919 | | CTxMemPool& pool, node::Warnings& warnings, Options opts) |
1920 | 11.0k | : m_rng{opts.deterministic_rng}, |
1921 | 11.0k | m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}, m_rng}, |
1922 | 11.0k | m_chainparams(chainman.GetParams()), |
1923 | 11.0k | m_connman(connman), |
1924 | 11.0k | m_addrman(addrman), |
1925 | 11.0k | m_banman(banman), |
1926 | 11.0k | m_chainman(chainman), |
1927 | 11.0k | m_mempool(pool), |
1928 | 11.0k | m_txdownloadman(node::TxDownloadOptions{pool, m_rng, opts.max_orphan_txs, opts.deterministic_rng}), |
1929 | 11.0k | m_warnings{warnings}, |
1930 | 11.0k | m_opts{opts} |
1931 | 11.0k | { |
1932 | | // While Erlay support is incomplete, it must be enabled explicitly via -txreconciliation. |
1933 | | // This argument can go away after Erlay support is complete. |
1934 | 11.0k | if (opts.reconcile_txs) { Branch (1934:9): [True: 11.0k, False: 0]
|
1935 | 11.0k | m_txreconciliation = std::make_unique<TxReconciliationTracker>(TXRECONCILIATION_VERSION); |
1936 | 11.0k | } |
1937 | 11.0k | } |
1938 | | |
1939 | | void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler) |
1940 | 11.0k | { |
1941 | | // Stale tip checking and peer eviction are on two different timers, but we |
1942 | | // don't want them to get out of sync due to drift in the scheduler, so we |
1943 | | // combine them in one function and schedule at the quicker (peer-eviction) |
1944 | | // timer. |
1945 | 11.0k | static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer"); |
1946 | 11.0k | scheduler.scheduleEvery([this] { this->CheckForStaleTipAndEvictPeers(); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL}); |
1947 | | |
1948 | | // schedule next run for 10-15 minutes in the future |
1949 | 11.0k | const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min); |
1950 | 11.0k | scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta); |
1951 | 11.0k | } |
1952 | | |
1953 | | void PeerManagerImpl::ActiveTipChange(const CBlockIndex& new_tip, bool is_ibd) |
1954 | 2.23M | { |
1955 | | // Ensure mempool mutex was released, otherwise deadlock may occur if another thread holding |
1956 | | // m_tx_download_mutex waits on the mempool mutex. |
1957 | 2.23M | AssertLockNotHeld(m_mempool.cs); |
1958 | 2.23M | AssertLockNotHeld(m_tx_download_mutex); |
1959 | | |
1960 | 2.23M | if (!is_ibd) { Branch (1960:9): [True: 2.22M, False: 11.0k]
|
1961 | 2.22M | LOCK(m_tx_download_mutex); |
1962 | | // If the chain tip has changed, previously rejected transactions might now be valid, e.g. due |
1963 | | // to a timelock. Reset the rejection filters to give those transactions another chance if we |
1964 | | // see them again. |
1965 | 2.22M | m_txdownloadman.ActiveTipChange(); |
1966 | 2.22M | } |
1967 | 2.23M | } |
1968 | | |
1969 | | /** |
1970 | | * Evict orphan txn pool entries based on a newly connected |
1971 | | * block, remember the recently confirmed transactions, and delete tracked |
1972 | | * announcements for them. Also save the time of the last tip update and |
1973 | | * possibly reduce dynamic block stalling timeout. |
1974 | | */ |
1975 | | void PeerManagerImpl::BlockConnected( |
1976 | | ChainstateRole role, |
1977 | | const std::shared_ptr<const CBlock>& pblock, |
1978 | | const CBlockIndex* pindex) |
1979 | 2.23M | { |
1980 | | // Update this for all chainstate roles so that we don't mistakenly see peers |
1981 | | // helping us do background IBD as having a stale tip. |
1982 | 2.23M | m_last_tip_update = GetTime<std::chrono::seconds>(); |
1983 | | |
1984 | | // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value |
1985 | 2.23M | auto stalling_timeout = m_block_stalling_timeout.load(); |
1986 | 2.23M | Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT); |
1987 | 2.23M | if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) { Branch (1987:9): [True: 0, False: 2.23M]
|
1988 | 0 | const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT); |
1989 | 0 | if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) { Branch (1989:13): [True: 0, False: 0]
|
1990 | 0 | LogDebug(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout)); |
1991 | 0 | } |
1992 | 0 | } |
1993 | | |
1994 | | // The following task can be skipped since we don't maintain a mempool for |
1995 | | // the ibd/background chainstate. |
1996 | 2.23M | if (role == ChainstateRole::BACKGROUND) { Branch (1996:9): [True: 0, False: 2.23M]
|
1997 | 0 | return; |
1998 | 0 | } |
1999 | 2.23M | LOCK(m_tx_download_mutex); |
2000 | 2.23M | m_txdownloadman.BlockConnected(pblock); |
2001 | 2.23M | } |
2002 | | |
2003 | | void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) |
2004 | 3.89k | { |
2005 | 3.89k | LOCK(m_tx_download_mutex); |
2006 | 3.89k | m_txdownloadman.BlockDisconnected(); |
2007 | 3.89k | } |
2008 | | |
2009 | | /** |
2010 | | * Maintain state about the best-seen block and fast-announce a compact block |
2011 | | * to compatible peers. |
2012 | | */ |
2013 | | void PeerManagerImpl::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) |
2014 | 2.22M | { |
2015 | 2.22M | auto pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock, FastRandomContext().rand64()); |
2016 | | |
2017 | 2.22M | LOCK(cs_main); |
2018 | | |
2019 | 2.22M | if (pindex->nHeight <= m_highest_fast_announce) Branch (2019:9): [True: 143, False: 2.22M]
|
2020 | 143 | return; |
2021 | 2.22M | m_highest_fast_announce = pindex->nHeight; |
2022 | | |
2023 | 2.22M | if (!DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) return; Branch (2023:9): [True: 0, False: 2.22M]
|
2024 | | |
2025 | 2.22M | uint256 hashBlock(pblock->GetHash()); |
2026 | 2.22M | const std::shared_future<CSerializedNetMsg> lazy_ser{ |
2027 | 2.22M | std::async(std::launch::deferred, [&] { return NetMsg::Make(NetMsgType::CMPCTBLOCK, *pcmpctblock); })}; |
2028 | | |
2029 | 2.22M | { |
2030 | 2.22M | auto most_recent_block_txs = std::make_unique<std::map<uint256, CTransactionRef>>(); |
2031 | 2.24M | for (const auto& tx : pblock->vtx) { Branch (2031:29): [True: 2.24M, False: 2.22M]
|
2032 | 2.24M | most_recent_block_txs->emplace(tx->GetHash(), tx); |
2033 | 2.24M | most_recent_block_txs->emplace(tx->GetWitnessHash(), tx); |
2034 | 2.24M | } |
2035 | | |
2036 | 2.22M | LOCK(m_most_recent_block_mutex); |
2037 | 2.22M | m_most_recent_block_hash = hashBlock; |
2038 | 2.22M | m_most_recent_block = pblock; |
2039 | 2.22M | m_most_recent_compact_block = pcmpctblock; |
2040 | 2.22M | m_most_recent_block_txs = std::move(most_recent_block_txs); |
2041 | 2.22M | } |
2042 | | |
2043 | 17.7M | m_connman.ForEachNode([this, pindex, &lazy_ser, &hashBlock](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { |
2044 | 17.7M | AssertLockHeld(::cs_main); |
2045 | | |
2046 | 17.7M | if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect) Branch (2046:13): [True: 0, False: 17.7M]
Branch (2046:70): [True: 0, False: 17.7M]
|
2047 | 0 | return; |
2048 | 17.7M | ProcessBlockAvailability(pnode->GetId()); |
2049 | 17.7M | CNodeState &state = *State(pnode->GetId()); |
2050 | | // If the peer has, or we announced to them the previous block already, |
2051 | | // but we don't think they have this one, go ahead and announce it |
2052 | 17.7M | if (state.m_requested_hb_cmpctblocks && !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) { Branch (2052:13): [True: 8.84M, False: 8.89M]
Branch (2052:49): [True: 8.84M, False: 940]
Branch (2052:83): [True: 7.57k, False: 8.84M]
|
2053 | | |
2054 | 7.57k | LogDebug(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerManager::NewPoWValidBlock", |
2055 | 7.57k | hashBlock.ToString(), pnode->GetId()); |
2056 | | |
2057 | 7.57k | const CSerializedNetMsg& ser_cmpctblock{lazy_ser.get()}; |
2058 | 7.57k | PushMessage(*pnode, ser_cmpctblock.Copy()); |
2059 | 7.57k | state.pindexBestHeaderSent = pindex; |
2060 | 7.57k | } |
2061 | 17.7M | }); |
2062 | 2.22M | } |
2063 | | |
2064 | | /** |
2065 | | * Update our best height and announce any block hashes which weren't previously |
2066 | | * in m_chainman.ActiveChain() to our peers. |
2067 | | */ |
2068 | | void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) |
2069 | 2.23M | { |
2070 | 2.23M | SetBestBlock(pindexNew->nHeight, std::chrono::seconds{pindexNew->GetBlockTime()}); |
2071 | | |
2072 | | // Don't relay inventory during initial block download. |
2073 | 2.23M | if (fInitialDownload) return; Branch (2073:9): [True: 11.0k, False: 2.22M]
|
2074 | | |
2075 | | // Find the hashes of all blocks that weren't previously in the best chain. |
2076 | 2.22M | std::vector<uint256> vHashes; |
2077 | 2.22M | const CBlockIndex *pindexToAnnounce = pindexNew; |
2078 | 4.44M | while (pindexToAnnounce != pindexFork) { Branch (2078:12): [True: 2.22M, False: 2.22M]
|
2079 | 2.22M | vHashes.push_back(pindexToAnnounce->GetBlockHash()); |
2080 | 2.22M | pindexToAnnounce = pindexToAnnounce->pprev; |
2081 | 2.22M | if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) { Branch (2081:13): [True: 0, False: 2.22M]
|
2082 | | // Limit announcements in case of a huge reorganization. |
2083 | | // Rely on the peer's synchronization mechanism in that case. |
2084 | 0 | break; |
2085 | 0 | } |
2086 | 2.22M | } |
2087 | | |
2088 | 2.22M | { |
2089 | 2.22M | LOCK(m_peer_mutex); |
2090 | 17.7M | for (auto& it : m_peer_map) { Branch (2090:23): [True: 17.7M, False: 2.22M]
|
2091 | 17.7M | Peer& peer = *it.second; |
2092 | 17.7M | LOCK(peer.m_block_inv_mutex); |
2093 | 17.7M | for (const uint256& hash : vHashes | std::views::reverse) { Branch (2093:38): [True: 17.7M, False: 17.7M]
|
2094 | 17.7M | peer.m_blocks_for_headers_relay.push_back(hash); |
2095 | 17.7M | } |
2096 | 17.7M | } |
2097 | 2.22M | } |
2098 | | |
2099 | 2.22M | m_connman.WakeMessageHandler(); |
2100 | 2.22M | } |
2101 | | |
2102 | | /** |
2103 | | * Handle invalid block rejection and consequent peer discouragement, maintain which |
2104 | | * peers announce compact blocks. |
2105 | | */ |
2106 | | void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationState& state) |
2107 | 2.24M | { |
2108 | 2.24M | LOCK(cs_main); |
2109 | | |
2110 | 2.24M | const uint256 hash(block.GetHash()); |
2111 | 2.24M | std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash); |
2112 | | |
2113 | | // If the block failed validation, we know where it came from and we're still connected |
2114 | | // to that peer, maybe punish. |
2115 | 2.24M | if (state.IsInvalid() && Branch (2115:9): [True: 4.97k, False: 2.23M]
Branch (2115:9): [True: 4.81k, False: 2.23M]
|
2116 | 2.24M | it != mapBlockSource.end() && Branch (2116:9): [True: 4.85k, False: 125]
|
2117 | 2.24M | State(it->second.first)) { Branch (2117:9): [True: 4.81k, False: 40]
|
2118 | 4.81k | MaybePunishNodeForBlock(/*nodeid=*/ it->second.first, state, /*via_compact_block=*/ !it->second.second); |
2119 | 4.81k | } |
2120 | | // Check that: |
2121 | | // 1. The block is valid |
2122 | | // 2. We're not in initial block download |
2123 | | // 3. This is currently the best block we're aware of. We haven't updated |
2124 | | // the tip yet so we have no way to check this directly here. Instead we |
2125 | | // just check that there are currently no other blocks in flight. |
2126 | 2.23M | else if (state.IsValid() && Branch (2126:14): [True: 2.23M, False: 165]
|
2127 | 2.23M | !m_chainman.IsInitialBlockDownload() && Branch (2127:14): [True: 2.22M, False: 11.0k]
|
2128 | 2.23M | mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) { Branch (2128:14): [True: 2.22M, False: 1.42k]
|
2129 | 2.22M | if (it != mapBlockSource.end()) { Branch (2129:13): [True: 2.22M, False: 1.80k]
|
2130 | 2.22M | MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first); |
2131 | 2.22M | } |
2132 | 2.22M | } |
2133 | 2.24M | if (it != mapBlockSource.end()) Branch (2133:9): [True: 2.22M, False: 13.3k]
|
2134 | 2.22M | mapBlockSource.erase(it); |
2135 | 2.24M | } |
2136 | | |
2137 | | ////////////////////////////////////////////////////////////////////////////// |
2138 | | // |
2139 | | // Messages |
2140 | | // |
2141 | | |
2142 | | bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash) |
2143 | 91.6k | { |
2144 | 91.6k | return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr; |
2145 | 91.6k | } |
2146 | | |
2147 | | void PeerManagerImpl::SendPings() |
2148 | 0 | { |
2149 | 0 | LOCK(m_peer_mutex); |
2150 | 0 | for(auto& it : m_peer_map) it.second->m_ping_queued = true; Branch (2150:18): [True: 0, False: 0]
|
2151 | 0 | } |
2152 | | |
2153 | | void PeerManagerImpl::RelayTransaction(const uint256& txid, const uint256& wtxid) |
2154 | 38.1k | { |
2155 | 38.1k | LOCK(m_peer_mutex); |
2156 | 298k | for(auto& it : m_peer_map) { Branch (2156:18): [True: 298k, False: 38.1k]
|
2157 | 298k | Peer& peer = *it.second; |
2158 | 298k | auto tx_relay = peer.GetTxRelay(); |
2159 | 298k | if (!tx_relay) continue; Branch (2159:13): [True: 0, False: 298k]
|
2160 | | |
2161 | 298k | LOCK(tx_relay->m_tx_inventory_mutex); |
2162 | | // Only queue transactions for announcement once the version handshake |
2163 | | // is completed. The time of arrival for these transactions is |
2164 | | // otherwise at risk of leaking to a spy, if the spy is able to |
2165 | | // distinguish transactions received during the handshake from the rest |
2166 | | // in the announcement. |
2167 | 298k | if (tx_relay->m_next_inv_send_time == 0s) continue; Branch (2167:13): [True: 0, False: 298k]
|
2168 | | |
2169 | 298k | const uint256& hash{peer.m_wtxid_relay ? wtxid : txid}; Branch (2169:29): [True: 298k, False: 0]
|
2170 | 298k | if (!tx_relay->m_tx_inventory_known_filter.contains(hash)) { Branch (2170:13): [True: 259k, False: 38.9k]
|
2171 | 259k | tx_relay->m_tx_inventory_to_send.insert(hash); |
2172 | 259k | } |
2173 | 298k | }; |
2174 | 38.1k | } |
2175 | | |
2176 | | void PeerManagerImpl::RelayAddress(NodeId originator, |
2177 | | const CAddress& addr, |
2178 | | bool fReachable) |
2179 | 69 | { |
2180 | | // We choose the same nodes within a given 24h window (if the list of connected |
2181 | | // nodes does not change) and we don't relay to nodes that already know an |
2182 | | // address. So within 24h we will likely relay a given address once. This is to |
2183 | | // prevent a peer from unjustly giving their address better propagation by sending |
2184 | | // it to us repeatedly. |
2185 | | |
2186 | 69 | if (!fReachable && !addr.IsRelayable()) return; Branch (2186:9): [True: 0, False: 69]
Branch (2186:24): [True: 0, False: 0]
|
2187 | | |
2188 | | // Relay to a limited number of other nodes |
2189 | | // Use deterministic randomness to send to the same nodes for 24 hours |
2190 | | // at a time so the m_addr_knowns of the chosen nodes prevent repeats |
2191 | 69 | const uint64_t hash_addr{CServiceHash(0, 0)(addr)}; |
2192 | 69 | const auto current_time{GetTime<std::chrono::seconds>()}; |
2193 | | // Adding address hash makes exact rotation time different per address, while preserving periodicity. |
2194 | 69 | const uint64_t time_addr{(static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) / count_seconds(ROTATE_ADDR_RELAY_DEST_INTERVAL)}; |
2195 | 69 | const CSipHasher hasher{m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY) |
2196 | 69 | .Write(hash_addr) |
2197 | 69 | .Write(time_addr)}; |
2198 | | |
2199 | | // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers. |
2200 | 69 | unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1; Branch (2200:33): [True: 69, False: 0]
Branch (2200:47): [True: 0, False: 0]
|
2201 | | |
2202 | 69 | std::array<std::pair<uint64_t, Peer*>, 2> best{{{0, nullptr}, {0, nullptr}}}; |
2203 | 69 | assert(nRelayNodes <= best.size()); Branch (2203:5): [True: 69, False: 0]
|
2204 | | |
2205 | 69 | LOCK(m_peer_mutex); |
2206 | | |
2207 | 539 | for (auto& [id, peer] : m_peer_map) { Branch (2207:27): [True: 539, False: 69]
|
2208 | 539 | if (peer->m_addr_relay_enabled && id != originator && IsAddrCompatible(*peer, addr)) { Branch (2208:13): [True: 373, False: 166]
Branch (2208:43): [True: 304, False: 69]
Branch (2208:63): [True: 304, False: 0]
|
2209 | 304 | uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize(); |
2210 | 589 | for (unsigned int i = 0; i < nRelayNodes; i++) { Branch (2210:38): [True: 458, False: 131]
|
2211 | 458 | if (hashKey > best[i].first) { Branch (2211:22): [True: 173, False: 285]
|
2212 | 173 | std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1); |
2213 | 173 | best[i] = std::make_pair(hashKey, peer.get()); |
2214 | 173 | break; |
2215 | 173 | } |
2216 | 458 | } |
2217 | 304 | } |
2218 | 539 | }; |
2219 | | |
2220 | 204 | for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) { Branch (2220:30): [True: 138, False: 66]
Branch (2220:49): [True: 135, False: 3]
|
2221 | 135 | PushAddress(*best[i].second, addr); |
2222 | 135 | } |
2223 | 69 | } |
2224 | | |
2225 | | void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv) |
2226 | 24.9k | { |
2227 | 24.9k | std::shared_ptr<const CBlock> a_recent_block; |
2228 | 24.9k | std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block; |
2229 | 24.9k | { |
2230 | 24.9k | LOCK(m_most_recent_block_mutex); |
2231 | 24.9k | a_recent_block = m_most_recent_block; |
2232 | 24.9k | a_recent_compact_block = m_most_recent_compact_block; |
2233 | 24.9k | } |
2234 | | |
2235 | 24.9k | bool need_activate_chain = false; |
2236 | 24.9k | { |
2237 | 24.9k | LOCK(cs_main); |
2238 | 24.9k | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash); |
2239 | 24.9k | if (pindex) { Branch (2239:13): [True: 23.3k, False: 1.61k]
|
2240 | 23.3k | if (pindex->HaveNumChainTxs() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) && Branch (2240:17): [True: 9.04k, False: 14.3k]
Branch (2240:46): [True: 2.91k, False: 6.12k]
|
2241 | 23.3k | pindex->IsValid(BLOCK_VALID_TREE)) { Branch (2241:21): [True: 2.74k, False: 172]
|
2242 | | // If we have the block and all of its parents, but have not yet validated it, |
2243 | | // we might be in the middle of connecting it (ie in the unlock of cs_main |
2244 | | // before ActivateBestChain but after AcceptBlock). |
2245 | | // In this case, we need to run ActivateBestChain prior to checking the relay |
2246 | | // conditions below. |
2247 | 2.74k | need_activate_chain = true; |
2248 | 2.74k | } |
2249 | 23.3k | } |
2250 | 24.9k | } // release cs_main before calling ActivateBestChain |
2251 | 24.9k | if (need_activate_chain) { Branch (2251:9): [True: 2.74k, False: 22.2k]
|
2252 | 2.74k | BlockValidationState state; |
2253 | 2.74k | if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) { Branch (2253:13): [True: 0, False: 2.74k]
|
2254 | 0 | LogDebug(BCLog::NET, "failed to activate chain (%s)\n", state.ToString()); |
2255 | 0 | } |
2256 | 2.74k | } |
2257 | | |
2258 | 24.9k | const CBlockIndex* pindex{nullptr}; |
2259 | 24.9k | const CBlockIndex* tip{nullptr}; |
2260 | 24.9k | bool can_direct_fetch{false}; |
2261 | 24.9k | FlatFilePos block_pos{}; |
2262 | 24.9k | { |
2263 | 24.9k | LOCK(cs_main); |
2264 | 24.9k | pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash); |
2265 | 24.9k | if (!pindex) { Branch (2265:13): [True: 1.61k, False: 23.3k]
|
2266 | 1.61k | return; |
2267 | 1.61k | } |
2268 | 23.3k | if (!BlockRequestAllowed(pindex)) { Branch (2268:13): [True: 18.0k, False: 5.31k]
|
2269 | 18.0k | LogDebug(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId()); |
2270 | 18.0k | return; |
2271 | 18.0k | } |
2272 | | // disconnect node in case we have reached the outbound limit for serving historical blocks |
2273 | 5.31k | if (m_connman.OutboundTargetReached(true) && Branch (2273:13): [True: 0, False: 5.31k]
|
2274 | 5.31k | (((m_chainman.m_best_header != nullptr) && (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) && Branch (2274:15): [True: 0, False: 0]
Branch (2274:56): [True: 0, False: 0]
Branch (2274:151): [True: 0, False: 0]
|
2275 | 5.31k | !pfrom.HasPermission(NetPermissionFlags::Download) // nodes with the download permission may exceed target Branch (2275:13): [True: 0, False: 0]
|
2276 | 5.31k | ) { |
2277 | 0 | LogDebug(BCLog::NET, "historical block serving limit reached, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
2278 | 0 | pfrom.fDisconnect = true; |
2279 | 0 | return; |
2280 | 0 | } |
2281 | 5.31k | tip = m_chainman.ActiveChain().Tip(); |
2282 | | // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold |
2283 | 5.31k | if (!pfrom.HasPermission(NetPermissionFlags::NoBan) && ( Branch (2283:13): [True: 5.31k, False: 0]
|
2284 | 5.31k | (((peer.m_our_services & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) && (tip->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) ) Branch (2284:18): [True: 5.31k, False: 0]
Branch (2284:92): [True: 0, False: 5.31k]
Branch (2284:150): [True: 0, False: 0]
|
2285 | 5.31k | )) { |
2286 | 0 | LogDebug(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
2287 | | //disconnect node and prevent it from stalling (would otherwise wait for the missing block) |
2288 | 0 | pfrom.fDisconnect = true; |
2289 | 0 | return; |
2290 | 0 | } |
2291 | | // Pruned nodes may have deleted the block, so check whether |
2292 | | // it's available before trying to send. |
2293 | 5.31k | if (!(pindex->nStatus & BLOCK_HAVE_DATA)) { Branch (2293:13): [True: 0, False: 5.31k]
|
2294 | 0 | return; |
2295 | 0 | } |
2296 | 5.31k | can_direct_fetch = CanDirectFetch(); |
2297 | 5.31k | block_pos = pindex->GetBlockPos(); |
2298 | 5.31k | } |
2299 | | |
2300 | 0 | std::shared_ptr<const CBlock> pblock; |
2301 | 5.31k | if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) { Branch (2301:9): [True: 5.31k, False: 0]
Branch (2301:9): [True: 1.22k, False: 4.09k]
Branch (2301:27): [True: 1.22k, False: 4.09k]
|
2302 | 1.22k | pblock = a_recent_block; |
2303 | 4.09k | } else if (inv.IsMsgWitnessBlk()) { Branch (2303:16): [True: 2.00k, False: 2.08k]
|
2304 | | // Fast-path: in this case it is possible to serve the block directly from disk, |
2305 | | // as the network format matches the format on disk |
2306 | 2.00k | std::vector<uint8_t> block_data; |
2307 | 2.00k | if (!m_chainman.m_blockman.ReadRawBlock(block_data, block_pos)) { Branch (2307:13): [True: 0, False: 2.00k]
|
2308 | 0 | if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) { |
2309 | 0 | LogDebug(BCLog::NET, "Block was pruned before it could be read, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
2310 | 0 | } else { |
2311 | 0 | LogError("Cannot load block from disk, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
2312 | 0 | } |
2313 | 0 | pfrom.fDisconnect = true; |
2314 | 0 | return; |
2315 | 0 | } |
2316 | 2.00k | MakeAndPushMessage(pfrom, NetMsgType::BLOCK, std::span{block_data}); |
2317 | | // Don't set pblock as we've sent the block |
2318 | 2.08k | } else { |
2319 | | // Send block from disk |
2320 | 2.08k | std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>(); |
2321 | 2.08k | if (!m_chainman.m_blockman.ReadBlock(*pblockRead, block_pos)) { Branch (2321:13): [True: 0, False: 2.08k]
|
2322 | 0 | if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) { |
2323 | 0 | LogDebug(BCLog::NET, "Block was pruned before it could be read, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
2324 | 0 | } else { |
2325 | 0 | LogError("Cannot load block from disk, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
2326 | 0 | } |
2327 | 0 | pfrom.fDisconnect = true; |
2328 | 0 | return; |
2329 | 0 | } |
2330 | 2.08k | pblock = pblockRead; |
2331 | 2.08k | } |
2332 | 5.31k | if (pblock) { Branch (2332:9): [True: 3.31k, False: 2.00k]
|
2333 | 3.31k | if (inv.IsMsgBlk()) { Branch (2333:13): [True: 2.80k, False: 502]
|
2334 | 2.80k | MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_NO_WITNESS(*pblock)); |
2335 | 2.80k | } else if (inv.IsMsgWitnessBlk()) { Branch (2335:20): [True: 502, False: 0]
|
2336 | 502 | MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock)); |
2337 | 502 | } else if (inv.IsMsgFilteredBlk()) { Branch (2337:20): [True: 0, False: 0]
|
2338 | 0 | bool sendMerkleBlock = false; |
2339 | 0 | CMerkleBlock merkleBlock; |
2340 | 0 | if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) { Branch (2340:52): [True: 0, False: 0]
|
2341 | 0 | LOCK(tx_relay->m_bloom_filter_mutex); |
2342 | 0 | if (tx_relay->m_bloom_filter) { Branch (2342:21): [True: 0, False: 0]
|
2343 | 0 | sendMerkleBlock = true; |
2344 | 0 | merkleBlock = CMerkleBlock(*pblock, *tx_relay->m_bloom_filter); |
2345 | 0 | } |
2346 | 0 | } |
2347 | 0 | if (sendMerkleBlock) { Branch (2347:17): [True: 0, False: 0]
|
2348 | 0 | MakeAndPushMessage(pfrom, NetMsgType::MERKLEBLOCK, merkleBlock); |
2349 | | // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see |
2350 | | // This avoids hurting performance by pointlessly requiring a round-trip |
2351 | | // Note that there is currently no way for a node to request any single transactions we didn't send here - |
2352 | | // they must either disconnect and retry or request the full block. |
2353 | | // Thus, the protocol spec specified allows for us to provide duplicate txn here, |
2354 | | // however we MUST always provide at least what the remote peer needs |
2355 | 0 | typedef std::pair<unsigned int, uint256> PairType; |
2356 | 0 | for (PairType& pair : merkleBlock.vMatchedTxn) Branch (2356:37): [True: 0, False: 0]
|
2357 | 0 | MakeAndPushMessage(pfrom, NetMsgType::TX, TX_NO_WITNESS(*pblock->vtx[pair.first])); |
2358 | 0 | } |
2359 | | // else |
2360 | | // no response |
2361 | 0 | } else if (inv.IsMsgCmpctBlk()) { Branch (2361:20): [True: 0, False: 0]
|
2362 | | // If a peer is asking for old blocks, we're almost guaranteed |
2363 | | // they won't have a useful mempool to match against a compact block, |
2364 | | // and we don't feel like constructing the object for them, so |
2365 | | // instead we respond with the full, non-compact block. |
2366 | 0 | if (can_direct_fetch && pindex->nHeight >= tip->nHeight - MAX_CMPCTBLOCK_DEPTH) { Branch (2366:17): [True: 0, False: 0]
Branch (2366:37): [True: 0, False: 0]
|
2367 | 0 | if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) { Branch (2367:21): [True: 0, False: 0]
Branch (2367:21): [True: 0, False: 0]
Branch (2367:47): [True: 0, False: 0]
|
2368 | 0 | MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, *a_recent_compact_block); |
2369 | 0 | } else { |
2370 | 0 | CBlockHeaderAndShortTxIDs cmpctblock{*pblock, m_rng.rand64()}; |
2371 | 0 | MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, cmpctblock); |
2372 | 0 | } |
2373 | 0 | } else { |
2374 | 0 | MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock)); |
2375 | 0 | } |
2376 | 0 | } |
2377 | 3.31k | } |
2378 | | |
2379 | 5.31k | { |
2380 | 5.31k | LOCK(peer.m_block_inv_mutex); |
2381 | | // Trigger the peer node to send a getblocks request for the next batch of inventory |
2382 | 5.31k | if (inv.hash == peer.m_continuation_block) { Branch (2382:13): [True: 0, False: 5.31k]
|
2383 | | // Send immediately. This must send even if redundant, |
2384 | | // and we want it right after the last block so they don't |
2385 | | // wait for other stuff first. |
2386 | 0 | std::vector<CInv> vInv; |
2387 | 0 | vInv.emplace_back(MSG_BLOCK, tip->GetBlockHash()); |
2388 | 0 | MakeAndPushMessage(pfrom, NetMsgType::INV, vInv); |
2389 | 0 | peer.m_continuation_block.SetNull(); |
2390 | 0 | } |
2391 | 5.31k | } |
2392 | 5.31k | } |
2393 | | |
2394 | | CTransactionRef PeerManagerImpl::FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid) |
2395 | 37.8k | { |
2396 | | // If a tx was in the mempool prior to the last INV for this peer, permit the request. |
2397 | 37.8k | auto txinfo = m_mempool.info_for_relay(gtxid, tx_relay.m_last_inv_sequence); |
2398 | 37.8k | if (txinfo.tx) { Branch (2398:9): [True: 648, False: 37.2k]
|
2399 | 648 | return std::move(txinfo.tx); |
2400 | 648 | } |
2401 | | |
2402 | | // Or it might be from the most recent block |
2403 | 37.2k | { |
2404 | 37.2k | LOCK(m_most_recent_block_mutex); |
2405 | 37.2k | if (m_most_recent_block_txs != nullptr) { Branch (2405:13): [True: 37.2k, False: 0]
|
2406 | 37.2k | auto it = m_most_recent_block_txs->find(gtxid.GetHash()); |
2407 | 37.2k | if (it != m_most_recent_block_txs->end()) return it->second; Branch (2407:17): [True: 1.42k, False: 35.7k]
|
2408 | 37.2k | } |
2409 | 37.2k | } |
2410 | | |
2411 | 35.7k | return {}; |
2412 | 37.2k | } |
2413 | | |
2414 | | void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) |
2415 | 30.2k | { |
2416 | 30.2k | AssertLockNotHeld(cs_main); |
2417 | | |
2418 | 30.2k | auto tx_relay = peer.GetTxRelay(); |
2419 | | |
2420 | 30.2k | std::deque<CInv>::iterator it = peer.m_getdata_requests.begin(); |
2421 | 30.2k | std::vector<CInv> vNotFound; |
2422 | | |
2423 | | // Process as many TX items from the front of the getdata queue as |
2424 | | // possible, since they're common and it's efficient to batch process |
2425 | | // them. |
2426 | 68.1k | while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) { Branch (2426:12): [True: 62.9k, False: 5.20k]
Branch (2426:12): [True: 37.8k, False: 30.2k]
Branch (2426:51): [True: 37.8k, False: 25.0k]
|
2427 | 37.8k | if (interruptMsgProc) return; Branch (2427:13): [True: 0, False: 37.8k]
|
2428 | | // The send buffer provides backpressure. If there's no space in |
2429 | | // the buffer, pause processing until the next call. |
2430 | 37.8k | if (pfrom.fPauseSend) break; Branch (2430:13): [True: 0, False: 37.8k]
|
2431 | | |
2432 | 37.8k | const CInv &inv = *it++; |
2433 | | |
2434 | 37.8k | if (tx_relay == nullptr) { Branch (2434:13): [True: 0, False: 37.8k]
|
2435 | | // Ignore GETDATA requests for transactions from block-relay-only |
2436 | | // peers and peers that asked us not to announce transactions. |
2437 | 0 | continue; |
2438 | 0 | } |
2439 | | |
2440 | 37.8k | CTransactionRef tx = FindTxForGetData(*tx_relay, ToGenTxid(inv)); |
2441 | 37.8k | if (tx) { Branch (2441:13): [True: 2.07k, False: 35.7k]
|
2442 | | // WTX and WITNESS_TX imply we serialize with witness |
2443 | 2.07k | const auto maybe_with_witness = (inv.IsMsgTx() ? TX_NO_WITNESS : TX_WITH_WITNESS); Branch (2443:46): [True: 575, False: 1.50k]
|
2444 | 2.07k | MakeAndPushMessage(pfrom, NetMsgType::TX, maybe_with_witness(*tx)); |
2445 | 2.07k | m_mempool.RemoveUnbroadcastTx(tx->GetHash()); |
2446 | 35.7k | } else { |
2447 | 35.7k | vNotFound.push_back(inv); |
2448 | 35.7k | } |
2449 | 37.8k | } |
2450 | | |
2451 | | // Only process one BLOCK item per call, since they're uncommon and can be |
2452 | | // expensive to process. |
2453 | 30.2k | if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) { Branch (2453:9): [True: 25.0k, False: 5.20k]
Branch (2453:9): [True: 25.0k, False: 5.20k]
Branch (2453:48): [True: 25.0k, False: 0]
|
2454 | 25.0k | const CInv &inv = *it++; |
2455 | 25.0k | if (inv.IsGenBlkMsg()) { Branch (2455:13): [True: 24.9k, False: 97]
|
2456 | 24.9k | ProcessGetBlockData(pfrom, peer, inv); |
2457 | 24.9k | } |
2458 | | // else: If the first item on the queue is an unknown type, we erase it |
2459 | | // and continue processing the queue on the next call. |
2460 | | // NOTE: previously we wouldn't do so and the peer sending us a malformed GETDATA could |
2461 | | // result in never making progress and this thread using 100% allocated CPU. See |
2462 | | // https://bitcoincore.org/en/2024/07/03/disclose-getdata-cpu. |
2463 | 25.0k | } |
2464 | | |
2465 | 30.2k | peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it); |
2466 | | |
2467 | 30.2k | if (!vNotFound.empty()) { Branch (2467:9): [True: 5.71k, False: 24.5k]
|
2468 | | // Let the peer know that we didn't find what it asked for, so it doesn't |
2469 | | // have to wait around forever. |
2470 | | // SPV clients care about this message: it's needed when they are |
2471 | | // recursively walking the dependencies of relevant unconfirmed |
2472 | | // transactions. SPV clients want to do that because they want to know |
2473 | | // about (and store and rebroadcast and risk analyze) the dependencies |
2474 | | // of transactions relevant to them, without having to download the |
2475 | | // entire memory pool. |
2476 | | // Also, other nodes can use these messages to automatically request a |
2477 | | // transaction from some other peer that announced it, and stop |
2478 | | // waiting for us to respond. |
2479 | | // In normal operation, we often send NOTFOUND messages for parents of |
2480 | | // transactions that we relay; if a peer is missing a parent, they may |
2481 | | // assume we have them and request the parents from us. |
2482 | 5.71k | MakeAndPushMessage(pfrom, NetMsgType::NOTFOUND, vNotFound); |
2483 | 5.71k | } |
2484 | 30.2k | } |
2485 | | |
2486 | | uint32_t PeerManagerImpl::GetFetchFlags(const Peer& peer) const |
2487 | 131k | { |
2488 | 131k | uint32_t nFetchFlags = 0; |
2489 | 131k | if (CanServeWitnesses(peer)) { Branch (2489:9): [True: 131k, False: 0]
|
2490 | 131k | nFetchFlags |= MSG_WITNESS_FLAG; |
2491 | 131k | } |
2492 | 131k | return nFetchFlags; |
2493 | 131k | } |
2494 | | |
2495 | | void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req) |
2496 | 0 | { |
2497 | 0 | BlockTransactions resp(req); |
2498 | 0 | for (size_t i = 0; i < req.indexes.size(); i++) { Branch (2498:24): [True: 0, False: 0]
|
2499 | 0 | if (req.indexes[i] >= block.vtx.size()) { Branch (2499:13): [True: 0, False: 0]
|
2500 | 0 | Misbehaving(peer, "getblocktxn with out-of-bounds tx indices"); |
2501 | 0 | return; |
2502 | 0 | } |
2503 | 0 | resp.txn[i] = block.vtx[req.indexes[i]]; |
2504 | 0 | } |
2505 | | |
2506 | 0 | MakeAndPushMessage(pfrom, NetMsgType::BLOCKTXN, resp); |
2507 | 0 | } |
2508 | | |
2509 | | bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer) |
2510 | 21.4k | { |
2511 | | // Do these headers have proof-of-work matching what's claimed? |
2512 | 21.4k | if (!HasValidProofOfWork(headers, consensusParams)) { Branch (2512:9): [True: 16, False: 21.4k]
|
2513 | 16 | Misbehaving(peer, "header with invalid proof of work"); |
2514 | 16 | return false; |
2515 | 16 | } |
2516 | | |
2517 | | // Are these headers connected to each other? |
2518 | 21.4k | if (!CheckHeadersAreContinuous(headers)) { Branch (2518:9): [True: 8, False: 21.4k]
|
2519 | 8 | Misbehaving(peer, "non-continuous headers sequence"); |
2520 | 8 | return false; |
2521 | 8 | } |
2522 | 21.4k | return true; |
2523 | 21.4k | } |
2524 | | |
2525 | | arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold() |
2526 | 2.26M | { |
2527 | 2.26M | arith_uint256 near_chaintip_work = 0; |
2528 | 2.26M | LOCK(cs_main); |
2529 | 2.26M | if (m_chainman.ActiveChain().Tip() != nullptr) { Branch (2529:9): [True: 2.26M, False: 0]
|
2530 | 2.26M | const CBlockIndex *tip = m_chainman.ActiveChain().Tip(); |
2531 | | // Use a 144 block buffer, so that we'll accept headers that fork from |
2532 | | // near our tip. |
2533 | 2.26M | near_chaintip_work = tip->nChainWork - std::min<arith_uint256>(144*GetBlockProof(*tip), tip->nChainWork); |
2534 | 2.26M | } |
2535 | 2.26M | return std::max(near_chaintip_work, m_chainman.MinimumChainWork()); |
2536 | 2.26M | } |
2537 | | |
2538 | | /** |
2539 | | * Special handling for unconnecting headers that might be part of a block |
2540 | | * announcement. |
2541 | | * |
2542 | | * We'll send a getheaders message in response to try to connect the chain. |
2543 | | */ |
2544 | | void PeerManagerImpl::HandleUnconnectingHeaders(CNode& pfrom, Peer& peer, |
2545 | | const std::vector<CBlockHeader>& headers) |
2546 | 707 | { |
2547 | | // Try to fill in the missing headers. |
2548 | 707 | const CBlockIndex* best_header{WITH_LOCK(cs_main, return m_chainman.m_best_header)}; |
2549 | 707 | if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) { Branch (2549:9): [True: 548, False: 159]
|
2550 | 548 | LogDebug(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d)\n", |
2551 | 548 | headers[0].GetHash().ToString(), |
2552 | 548 | headers[0].hashPrevBlock.ToString(), |
2553 | 548 | best_header->nHeight, |
2554 | 548 | pfrom.GetId()); |
2555 | 548 | } |
2556 | | |
2557 | | // Set hashLastUnknownBlock for this peer, so that if we |
2558 | | // eventually get the headers - even from a different peer - |
2559 | | // we can use this peer to download. |
2560 | 707 | WITH_LOCK(cs_main, UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash())); |
2561 | 707 | } |
2562 | | |
2563 | | bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const |
2564 | 21.4k | { |
2565 | 21.4k | uint256 hashLastBlock; |
2566 | 21.4k | for (const CBlockHeader& header : headers) { Branch (2566:37): [True: 21.4k, False: 21.4k]
|
2567 | 21.4k | if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) { Branch (2567:13): [True: 8, False: 21.4k]
Branch (2567:40): [True: 8, False: 0]
|
2568 | 8 | return false; |
2569 | 8 | } |
2570 | 21.4k | hashLastBlock = header.GetHash(); |
2571 | 21.4k | } |
2572 | 21.4k | return true; |
2573 | 21.4k | } |
2574 | | |
2575 | | bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, std::vector<CBlockHeader>& headers) |
2576 | 21.4k | { |
2577 | 21.4k | if (peer.m_headers_sync) { Branch (2577:9): [True: 0, False: 21.4k]
|
2578 | 0 | auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == m_opts.max_headers_result); |
2579 | | // If it is a valid continuation, we should treat the existing getheaders request as responded to. |
2580 | 0 | if (result.success) peer.m_last_getheaders_timestamp = {}; Branch (2580:13): [True: 0, False: 0]
|
2581 | 0 | if (result.request_more) { Branch (2581:13): [True: 0, False: 0]
|
2582 | 0 | auto locator = peer.m_headers_sync->NextHeadersRequestLocator(); |
2583 | | // If we were instructed to ask for a locator, it should not be empty. |
2584 | 0 | Assume(!locator.vHave.empty()); |
2585 | | // We can only be instructed to request more if processing was successful. |
2586 | 0 | Assume(result.success); |
2587 | 0 | if (!locator.vHave.empty()) { Branch (2587:17): [True: 0, False: 0]
|
2588 | | // It should be impossible for the getheaders request to fail, |
2589 | | // because we just cleared the last getheaders timestamp. |
2590 | 0 | bool sent_getheaders = MaybeSendGetHeaders(pfrom, locator, peer); |
2591 | 0 | Assume(sent_getheaders); |
2592 | 0 | LogDebug(BCLog::NET, "more getheaders (from %s) to peer=%d\n", |
2593 | 0 | locator.vHave.front().ToString(), pfrom.GetId()); |
2594 | 0 | } |
2595 | 0 | } |
2596 | |
|
2597 | 0 | if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) { Branch (2597:13): [True: 0, False: 0]
|
2598 | 0 | peer.m_headers_sync.reset(nullptr); |
2599 | | |
2600 | | // Delete this peer's entry in m_headers_presync_stats. |
2601 | | // If this is m_headers_presync_bestpeer, it will be replaced later |
2602 | | // by the next peer that triggers the else{} branch below. |
2603 | 0 | LOCK(m_headers_presync_mutex); |
2604 | 0 | m_headers_presync_stats.erase(pfrom.GetId()); |
2605 | 0 | } else { |
2606 | | // Build statistics for this peer's sync. |
2607 | 0 | HeadersPresyncStats stats; |
2608 | 0 | stats.first = peer.m_headers_sync->GetPresyncWork(); |
2609 | 0 | if (peer.m_headers_sync->GetState() == HeadersSyncState::State::PRESYNC) { Branch (2609:17): [True: 0, False: 0]
|
2610 | 0 | stats.second = {peer.m_headers_sync->GetPresyncHeight(), |
2611 | 0 | peer.m_headers_sync->GetPresyncTime()}; |
2612 | 0 | } |
2613 | | |
2614 | | // Update statistics in stats. |
2615 | 0 | LOCK(m_headers_presync_mutex); |
2616 | 0 | m_headers_presync_stats[pfrom.GetId()] = stats; |
2617 | 0 | auto best_it = m_headers_presync_stats.find(m_headers_presync_bestpeer); |
2618 | 0 | bool best_updated = false; |
2619 | 0 | if (best_it == m_headers_presync_stats.end()) { Branch (2619:17): [True: 0, False: 0]
|
2620 | | // If the cached best peer is outdated, iterate over all remaining ones (including |
2621 | | // newly updated one) to find the best one. |
2622 | 0 | NodeId peer_best{-1}; |
2623 | 0 | const HeadersPresyncStats* stat_best{nullptr}; |
2624 | 0 | for (const auto& [peer, stat] : m_headers_presync_stats) { Branch (2624:47): [True: 0, False: 0]
|
2625 | 0 | if (!stat_best || stat > *stat_best) { Branch (2625:25): [True: 0, False: 0]
Branch (2625:39): [True: 0, False: 0]
|
2626 | 0 | peer_best = peer; |
2627 | 0 | stat_best = &stat; |
2628 | 0 | } |
2629 | 0 | } |
2630 | 0 | m_headers_presync_bestpeer = peer_best; |
2631 | 0 | best_updated = (peer_best == pfrom.GetId()); |
2632 | 0 | } else if (best_it->first == pfrom.GetId() || stats > best_it->second) { Branch (2632:24): [True: 0, False: 0]
Branch (2632:59): [True: 0, False: 0]
|
2633 | | // pfrom was and remains the best peer, or pfrom just became best. |
2634 | 0 | m_headers_presync_bestpeer = pfrom.GetId(); |
2635 | 0 | best_updated = true; |
2636 | 0 | } |
2637 | 0 | if (best_updated && stats.second.has_value()) { Branch (2637:17): [True: 0, False: 0]
Branch (2637:33): [True: 0, False: 0]
|
2638 | | // If the best peer updated, and it is in its first phase, signal. |
2639 | 0 | m_headers_presync_should_signal = true; |
2640 | 0 | } |
2641 | 0 | } |
2642 | |
|
2643 | 0 | if (result.success) { Branch (2643:13): [True: 0, False: 0]
|
2644 | | // We only overwrite the headers passed in if processing was |
2645 | | // successful. |
2646 | 0 | headers.swap(result.pow_validated_headers); |
2647 | 0 | } |
2648 | |
|
2649 | 0 | return result.success; |
2650 | 0 | } |
2651 | | // Either we didn't have a sync in progress, or something went wrong |
2652 | | // processing these headers, or we are returning headers to the caller to |
2653 | | // process. |
2654 | 21.4k | return false; |
2655 | 21.4k | } |
2656 | | |
2657 | | bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlockIndex* chain_start_header, std::vector<CBlockHeader>& headers) |
2658 | 18.9k | { |
2659 | | // Calculate the claimed total work on this chain. |
2660 | 18.9k | arith_uint256 total_work = chain_start_header->nChainWork + CalculateClaimedHeadersWork(headers); |
2661 | | |
2662 | | // Our dynamic anti-DoS threshold (minimum work required on a headers chain |
2663 | | // before we'll store it) |
2664 | 18.9k | arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold(); |
2665 | | |
2666 | | // Avoid DoS via low-difficulty-headers by only processing if the headers |
2667 | | // are part of a chain with sufficient work. |
2668 | 18.9k | if (total_work < minimum_chain_work) { Branch (2668:9): [True: 0, False: 18.9k]
|
2669 | | // Only try to sync with this peer if their headers message was full; |
2670 | | // otherwise they don't have more headers after this so no point in |
2671 | | // trying to sync their too-little-work chain. |
2672 | 0 | if (headers.size() == m_opts.max_headers_result) { Branch (2672:13): [True: 0, False: 0]
|
2673 | | // Note: we could advance to the last header in this set that is |
2674 | | // known to us, rather than starting at the first header (which we |
2675 | | // may already have); however this is unlikely to matter much since |
2676 | | // ProcessHeadersMessage() already handles the case where all |
2677 | | // headers in a received message are already known and are |
2678 | | // ancestors of m_best_header or chainActive.Tip(), by skipping |
2679 | | // this logic in that case. So even if the first header in this set |
2680 | | // of headers is known, some header in this set must be new, so |
2681 | | // advancing to the first unknown header would be a small effect. |
2682 | 0 | LOCK(peer.m_headers_sync_mutex); |
2683 | 0 | peer.m_headers_sync.reset(new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(), |
2684 | 0 | chain_start_header, minimum_chain_work)); |
2685 | | |
2686 | | // Now a HeadersSyncState object for tracking this synchronization |
2687 | | // is created, process the headers using it as normal. Failures are |
2688 | | // handled inside of IsContinuationOfLowWorkHeadersSync. |
2689 | 0 | (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers); |
2690 | 0 | } else { |
2691 | 0 | LogDebug(BCLog::NET, "Ignoring low-work chain (height=%u) from peer=%d\n", chain_start_header->nHeight + headers.size(), pfrom.GetId()); |
2692 | 0 | } |
2693 | | |
2694 | | // The peer has not yet given us a chain that meets our work threshold, |
2695 | | // so we want to prevent further processing of the headers in any case. |
2696 | 0 | headers = {}; |
2697 | 0 | return true; |
2698 | 0 | } |
2699 | | |
2700 | 18.9k | return false; |
2701 | 18.9k | } |
2702 | | |
2703 | | bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) |
2704 | 20.6k | { |
2705 | 20.6k | if (header == nullptr) { Branch (2705:9): [True: 17.3k, False: 3.38k]
|
2706 | 17.3k | return false; |
2707 | 17.3k | } else if (m_chainman.m_best_header != nullptr && header == m_chainman.m_best_header->GetAncestor(header->nHeight)) { Branch (2707:16): [True: 3.38k, False: 0]
Branch (2707:55): [True: 1.68k, False: 1.70k]
|
2708 | 1.68k | return true; |
2709 | 1.70k | } else if (m_chainman.ActiveChain().Contains(header)) { Branch (2709:16): [True: 60, False: 1.64k]
|
2710 | 60 | return true; |
2711 | 60 | } |
2712 | 1.64k | return false; |
2713 | 20.6k | } |
2714 | | |
2715 | | bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) |
2716 | 91.8k | { |
2717 | 91.8k | const auto current_time = NodeClock::now(); |
2718 | | |
2719 | | // Only allow a new getheaders message to go out if we don't have a recent |
2720 | | // one already in-flight |
2721 | 91.8k | if (current_time - peer.m_last_getheaders_timestamp > HEADERS_RESPONSE_TIME) { Branch (2721:9): [True: 91.6k, False: 198]
|
2722 | 91.6k | MakeAndPushMessage(pfrom, NetMsgType::GETHEADERS, locator, uint256()); |
2723 | 91.6k | peer.m_last_getheaders_timestamp = current_time; |
2724 | 91.6k | return true; |
2725 | 91.6k | } |
2726 | 198 | return false; |
2727 | 91.8k | } |
2728 | | |
2729 | | /* |
2730 | | * Given a new headers tip ending in last_header, potentially request blocks towards that tip. |
2731 | | * We require that the given tip have at least as much work as our tip, and for |
2732 | | * our current tip to be "close to synced" (see CanDirectFetch()). |
2733 | | */ |
2734 | | void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header) |
2735 | 19.2k | { |
2736 | 19.2k | LOCK(cs_main); |
2737 | 19.2k | CNodeState *nodestate = State(pfrom.GetId()); |
2738 | | |
2739 | 19.2k | if (CanDirectFetch() && last_header.IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= last_header.nChainWork) { Branch (2739:9): [True: 7.15k, False: 12.0k]
Branch (2739:29): [True: 7.15k, False: 0]
Branch (2739:70): [True: 4.99k, False: 2.16k]
|
2740 | 4.99k | std::vector<const CBlockIndex*> vToFetch; |
2741 | 4.99k | const CBlockIndex* pindexWalk{&last_header}; |
2742 | | // Calculate all the blocks we'd need to switch to last_header, up to a limit. |
2743 | 14.5k | while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { Branch (2743:16): [True: 14.5k, False: 0]
Branch (2743:30): [True: 9.52k, False: 4.99k]
Branch (2743:80): [True: 9.52k, False: 0]
|
2744 | 9.52k | if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && Branch (2744:17): [True: 7.78k, False: 1.74k]
Branch (2744:17): [True: 4.98k, False: 4.54k]
|
2745 | 9.52k | !IsBlockRequested(pindexWalk->GetBlockHash()) && Branch (2745:21): [True: 4.98k, False: 2.79k]
|
2746 | 9.52k | (!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || CanServeWitnesses(peer))) { Branch (2746:22): [True: 0, False: 4.98k]
Branch (2746:100): [True: 4.98k, False: 0]
|
2747 | | // We don't have this block, and it's not yet in flight. |
2748 | 4.98k | vToFetch.push_back(pindexWalk); |
2749 | 4.98k | } |
2750 | 9.52k | pindexWalk = pindexWalk->pprev; |
2751 | 9.52k | } |
2752 | | // If pindexWalk still isn't on our main chain, we're looking at a |
2753 | | // very large reorg at a time we think we're close to caught up to |
2754 | | // the main chain -- this shouldn't really happen. Bail out on the |
2755 | | // direct fetch and rely on parallel download instead. |
2756 | 4.99k | if (!m_chainman.ActiveChain().Contains(pindexWalk)) { Branch (2756:13): [True: 0, False: 4.99k]
|
2757 | 0 | LogDebug(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n", |
2758 | 0 | last_header.GetBlockHash().ToString(), |
2759 | 0 | last_header.nHeight); |
2760 | 4.99k | } else { |
2761 | 4.99k | std::vector<CInv> vGetData; |
2762 | | // Download as much as possible, from earliest to latest. |
2763 | 4.99k | for (const CBlockIndex* pindex : vToFetch | std::views::reverse) { Branch (2763:44): [True: 4.81k, False: 4.83k]
|
2764 | 4.81k | if (nodestate->vBlocksInFlight.size() >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { Branch (2764:21): [True: 152, False: 4.65k]
|
2765 | | // Can't download any more from this peer |
2766 | 152 | break; |
2767 | 152 | } |
2768 | 4.65k | uint32_t nFetchFlags = GetFetchFlags(peer); |
2769 | 4.65k | vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()); |
2770 | 4.65k | BlockRequested(pfrom.GetId(), *pindex); |
2771 | 4.65k | LogDebug(BCLog::NET, "Requesting block %s from peer=%d\n", |
2772 | 4.65k | pindex->GetBlockHash().ToString(), pfrom.GetId()); |
2773 | 4.65k | } |
2774 | 4.99k | if (vGetData.size() > 1) { Branch (2774:17): [True: 194, False: 4.79k]
|
2775 | 194 | LogDebug(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n", |
2776 | 194 | last_header.GetBlockHash().ToString(), |
2777 | 194 | last_header.nHeight); |
2778 | 194 | } |
2779 | 4.99k | if (vGetData.size() > 0) { Branch (2779:17): [True: 4.38k, False: 610]
|
2780 | 4.38k | if (!m_opts.ignore_incoming_txs && Branch (2780:21): [True: 4.38k, False: 0]
|
2781 | 4.38k | nodestate->m_provides_cmpctblocks && Branch (2781:25): [True: 4.38k, False: 0]
|
2782 | 4.38k | vGetData.size() == 1 && Branch (2782:25): [True: 4.18k, False: 194]
|
2783 | 4.38k | mapBlocksInFlight.size() == 1 && Branch (2783:25): [True: 2.33k, False: 1.85k]
|
2784 | 4.38k | last_header.pprev->IsValid(BLOCK_VALID_CHAIN)) { Branch (2784:25): [True: 1.98k, False: 349]
|
2785 | | // In any case, we want to download using a compact block, not a regular one |
2786 | 1.98k | vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); |
2787 | 1.98k | } |
2788 | 4.38k | MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vGetData); |
2789 | 4.38k | } |
2790 | 4.99k | } |
2791 | 4.99k | } |
2792 | 19.2k | } |
2793 | | |
2794 | | /** |
2795 | | * Given receipt of headers from a peer ending in last_header, along with |
2796 | | * whether that header was new and whether the headers message was full, |
2797 | | * update the state we keep for the peer. |
2798 | | */ |
2799 | | void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer, |
2800 | | const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers) |
2801 | 19.2k | { |
2802 | 19.2k | LOCK(cs_main); |
2803 | 19.2k | CNodeState *nodestate = State(pfrom.GetId()); |
2804 | | |
2805 | 19.2k | UpdateBlockAvailability(pfrom.GetId(), last_header.GetBlockHash()); |
2806 | | |
2807 | | // From here, pindexBestKnownBlock should be guaranteed to be non-null, |
2808 | | // because it is set in UpdateBlockAvailability. Some nullptr checks |
2809 | | // are still present, however, as belt-and-suspenders. |
2810 | | |
2811 | 19.2k | if (received_new_header && last_header.nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) { Branch (2811:9): [True: 15.8k, False: 3.37k]
Branch (2811:32): [True: 7.94k, False: 7.90k]
|
2812 | 7.94k | nodestate->m_last_block_announcement = GetTime(); |
2813 | 7.94k | } |
2814 | | |
2815 | | // If we're in IBD, we want outbound peers that will serve us a useful |
2816 | | // chain. Disconnect peers that are on chains with insufficient work. |
2817 | 19.2k | if (m_chainman.IsInitialBlockDownload() && !may_have_more_headers) { Branch (2817:9): [True: 0, False: 19.2k]
Branch (2817:48): [True: 0, False: 0]
|
2818 | | // If the peer has no more headers to give us, then we know we have |
2819 | | // their tip. |
2820 | 0 | if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) { Branch (2820:13): [True: 0, False: 0]
Branch (2820:48): [True: 0, False: 0]
|
2821 | | // This peer has too little work on their headers chain to help |
2822 | | // us sync -- disconnect if it is an outbound disconnection |
2823 | | // candidate. |
2824 | | // Note: We compare their tip to the minimum chain work (rather than |
2825 | | // m_chainman.ActiveChain().Tip()) because we won't start block download |
2826 | | // until we have a headers chain that has at least |
2827 | | // the minimum chain work, even if a peer has a chain past our tip, |
2828 | | // as an anti-DoS measure. |
2829 | 0 | if (pfrom.IsOutboundOrBlockRelayConn()) { Branch (2829:17): [True: 0, False: 0]
|
2830 | 0 | LogInfo("outbound peer headers chain has insufficient work, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
2831 | 0 | pfrom.fDisconnect = true; |
2832 | 0 | } |
2833 | 0 | } |
2834 | 0 | } |
2835 | | |
2836 | | // If this is an outbound full-relay peer, check to see if we should protect |
2837 | | // it from the bad/lagging chain logic. |
2838 | | // Note that outbound block-relay peers are excluded from this protection, and |
2839 | | // thus always subject to eviction under the bad/lagging chain logic. |
2840 | | // See ChainSyncTimeoutState. |
2841 | 19.2k | if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) { Branch (2841:9): [True: 19.2k, False: 0]
Branch (2841:31): [True: 8.71k, False: 10.5k]
Branch (2841:61): [True: 8.71k, False: 0]
|
2842 | 8.71k | if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) { Branch (2842:13): [True: 8.66k, False: 55]
Branch (2842:110): [True: 7.57k, False: 1.09k]
Branch (2842:203): [True: 2.95k, False: 4.62k]
|
2843 | 2.95k | LogDebug(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId()); |
2844 | 2.95k | nodestate->m_chain_sync.m_protect = true; |
2845 | 2.95k | ++m_outbound_peers_with_protect_from_disconnect; |
2846 | 2.95k | } |
2847 | 8.71k | } |
2848 | 19.2k | } |
2849 | | |
2850 | | void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer, |
2851 | | std::vector<CBlockHeader>&& headers, |
2852 | | bool via_compact_block) |
2853 | 21.4k | { |
2854 | 21.4k | size_t nCount = headers.size(); |
2855 | | |
2856 | 21.4k | if (nCount == 0) { Branch (2856:9): [True: 22, False: 21.4k]
|
2857 | | // Nothing interesting. Stop asking this peers for more headers. |
2858 | | // If we were in the middle of headers sync, receiving an empty headers |
2859 | | // message suggests that the peer suddenly has nothing to give us |
2860 | | // (perhaps it reorged to our chain). Clear download state for this peer. |
2861 | 22 | LOCK(peer.m_headers_sync_mutex); |
2862 | 22 | if (peer.m_headers_sync) { Branch (2862:13): [True: 0, False: 22]
|
2863 | 0 | peer.m_headers_sync.reset(nullptr); |
2864 | 0 | LOCK(m_headers_presync_mutex); |
2865 | 0 | m_headers_presync_stats.erase(pfrom.GetId()); |
2866 | 0 | } |
2867 | | // A headers message with no headers cannot be an announcement, so assume |
2868 | | // it is a response to our last getheaders request, if there is one. |
2869 | 22 | peer.m_last_getheaders_timestamp = {}; |
2870 | 22 | return; |
2871 | 22 | } |
2872 | | |
2873 | | // Before we do any processing, make sure these pass basic sanity checks. |
2874 | | // We'll rely on headers having valid proof-of-work further down, as an |
2875 | | // anti-DoS criteria (note: this check is required before passing any |
2876 | | // headers into HeadersSyncState). |
2877 | 21.4k | if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) { Branch (2877:9): [True: 24, False: 21.4k]
|
2878 | | // Misbehaving() calls are handled within CheckHeadersPoW(), so we can |
2879 | | // just return. (Note that even if a header is announced via compact |
2880 | | // block, the header itself should be valid, so this type of error can |
2881 | | // always be punished.) |
2882 | 24 | return; |
2883 | 24 | } |
2884 | | |
2885 | 21.4k | const CBlockIndex *pindexLast = nullptr; |
2886 | | |
2887 | | // We'll set already_validated_work to true if these headers are |
2888 | | // successfully processed as part of a low-work headers sync in progress |
2889 | | // (either in PRESYNC or REDOWNLOAD phase). |
2890 | | // If true, this will mean that any headers returned to us (ie during |
2891 | | // REDOWNLOAD) can be validated without further anti-DoS checks. |
2892 | 21.4k | bool already_validated_work = false; |
2893 | | |
2894 | | // If we're in the middle of headers sync, let it do its magic. |
2895 | 21.4k | bool have_headers_sync = false; |
2896 | 21.4k | { |
2897 | 21.4k | LOCK(peer.m_headers_sync_mutex); |
2898 | | |
2899 | 21.4k | already_validated_work = IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers); |
2900 | | |
2901 | | // The headers we passed in may have been: |
2902 | | // - untouched, perhaps if no headers-sync was in progress, or some |
2903 | | // failure occurred |
2904 | | // - erased, such as if the headers were successfully processed and no |
2905 | | // additional headers processing needs to take place (such as if we |
2906 | | // are still in PRESYNC) |
2907 | | // - replaced with headers that are now ready for validation, such as |
2908 | | // during the REDOWNLOAD phase of a low-work headers sync. |
2909 | | // So just check whether we still have headers that we need to process, |
2910 | | // or not. |
2911 | 21.4k | if (headers.empty()) { Branch (2911:13): [True: 0, False: 21.4k]
|
2912 | 0 | return; |
2913 | 0 | } |
2914 | | |
2915 | 21.4k | have_headers_sync = !!peer.m_headers_sync; |
2916 | 21.4k | } |
2917 | | |
2918 | | // Do these headers connect to something in our block index? |
2919 | 21.4k | const CBlockIndex *chain_start_header{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock))}; |
2920 | 21.4k | bool headers_connect_blockindex{chain_start_header != nullptr}; |
2921 | | |
2922 | 21.4k | if (!headers_connect_blockindex) { Branch (2922:9): [True: 707, False: 20.6k]
|
2923 | | // This could be a BIP 130 block announcement, use |
2924 | | // special logic for handling headers that don't connect, as this |
2925 | | // could be benign. |
2926 | 707 | HandleUnconnectingHeaders(pfrom, peer, headers); |
2927 | 707 | return; |
2928 | 707 | } |
2929 | | |
2930 | | // If headers connect, assume that this is in response to any outstanding getheaders |
2931 | | // request we may have sent, and clear out the time of our last request. Non-connecting |
2932 | | // headers cannot be a response to a getheaders request. |
2933 | 20.6k | peer.m_last_getheaders_timestamp = {}; |
2934 | | |
2935 | | // If the headers we received are already in memory and an ancestor of |
2936 | | // m_best_header or our tip, skip anti-DoS checks. These headers will not |
2937 | | // use any more memory (and we are not leaking information that could be |
2938 | | // used to fingerprint us). |
2939 | 20.6k | const CBlockIndex *last_received_header{nullptr}; |
2940 | 20.6k | { |
2941 | 20.6k | LOCK(cs_main); |
2942 | 20.6k | last_received_header = m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash()); |
2943 | 20.6k | if (IsAncestorOfBestHeaderOrTip(last_received_header)) { Branch (2943:13): [True: 1.74k, False: 18.9k]
|
2944 | 1.74k | already_validated_work = true; |
2945 | 1.74k | } |
2946 | 20.6k | } |
2947 | | |
2948 | | // If our peer has NetPermissionFlags::NoBan privileges, then bypass our |
2949 | | // anti-DoS logic (this saves bandwidth when we connect to a trusted peer |
2950 | | // on startup). |
2951 | 20.6k | if (pfrom.HasPermission(NetPermissionFlags::NoBan)) { Branch (2951:9): [True: 0, False: 20.6k]
|
2952 | 0 | already_validated_work = true; |
2953 | 0 | } |
2954 | | |
2955 | | // At this point, the headers connect to something in our block index. |
2956 | | // Do anti-DoS checks to determine if we should process or store for later |
2957 | | // processing. |
2958 | 20.6k | if (!already_validated_work && TryLowWorkHeadersSync(peer, pfrom, Branch (2958:9): [True: 18.9k, False: 1.74k]
Branch (2958:36): [True: 0, False: 18.9k]
|
2959 | 18.9k | chain_start_header, headers)) { |
2960 | | // If we successfully started a low-work headers sync, then there |
2961 | | // should be no headers to process any further. |
2962 | 0 | Assume(headers.empty()); |
2963 | 0 | return; |
2964 | 0 | } |
2965 | | |
2966 | | // At this point, we have a set of headers with sufficient work on them |
2967 | | // which can be processed. |
2968 | | |
2969 | | // If we don't have the last header, then this peer will have given us |
2970 | | // something new (if these headers are valid). |
2971 | 20.6k | bool received_new_header{last_received_header == nullptr}; |
2972 | | |
2973 | | // Now process all the headers. |
2974 | 20.6k | BlockValidationState state; |
2975 | 20.6k | const bool processed{m_chainman.ProcessNewBlockHeaders(headers, |
2976 | 20.6k | /*min_pow_checked=*/true, |
2977 | 20.6k | state, &pindexLast)}; |
2978 | 20.6k | if (!processed) { Branch (2978:9): [True: 1.46k, False: 19.2k]
|
2979 | 1.46k | if (state.IsInvalid()) { Branch (2979:13): [True: 1.46k, False: 0]
|
2980 | 1.46k | MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received"); |
2981 | 1.46k | return; |
2982 | 1.46k | } |
2983 | 1.46k | } |
2984 | 20.6k | assert(pindexLast); Branch (2984:5): [True: 19.2k, False: 0]
|
2985 | | |
2986 | 19.2k | if (processed && received_new_header) { Branch (2986:9): [True: 19.2k, False: 0]
Branch (2986:22): [True: 15.8k, False: 3.37k]
|
2987 | 15.8k | LogBlockHeader(*pindexLast, pfrom, /*via_compact_block=*/false); |
2988 | 15.8k | } |
2989 | | |
2990 | | // Consider fetching more headers if we are not using our headers-sync mechanism. |
2991 | 19.2k | if (nCount == m_opts.max_headers_result && !have_headers_sync) { Branch (2991:9): [True: 0, False: 19.2k]
Branch (2991:48): [True: 0, False: 0]
|
2992 | | // Headers message had its maximum size; the peer may have more headers. |
2993 | 0 | if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) { Branch (2993:13): [True: 0, False: 0]
|
2994 | 0 | LogDebug(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", |
2995 | 0 | pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height); |
2996 | 0 | } |
2997 | 0 | } |
2998 | | |
2999 | 19.2k | UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast, received_new_header, nCount == m_opts.max_headers_result); |
3000 | | |
3001 | | // Consider immediately downloading blocks. |
3002 | 19.2k | HeadersDirectFetchBlocks(pfrom, peer, *pindexLast); |
3003 | | |
3004 | 19.2k | return; |
3005 | 19.2k | } |
3006 | | |
3007 | | std::optional<node::PackageToValidate> PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx, const TxValidationState& state, |
3008 | | bool first_time_failure) |
3009 | 353k | { |
3010 | 353k | AssertLockNotHeld(m_peer_mutex); |
3011 | 353k | AssertLockHeld(g_msgproc_mutex); |
3012 | 353k | AssertLockHeld(m_tx_download_mutex); |
3013 | | |
3014 | 353k | PeerRef peer{GetPeerRef(nodeid)}; |
3015 | | |
3016 | 353k | LogDebug(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n", |
3017 | 353k | ptx->GetHash().ToString(), |
3018 | 353k | ptx->GetWitnessHash().ToString(), |
3019 | 353k | nodeid, |
3020 | 353k | state.ToString()); |
3021 | | |
3022 | 353k | const auto& [add_extra_compact_tx, unique_parents, package_to_validate] = m_txdownloadman.MempoolRejectedTx(ptx, state, nodeid, first_time_failure); |
3023 | | |
3024 | 353k | if (add_extra_compact_tx && RecursiveDynamicUsage(*ptx) < 100000) { Branch (3024:9): [True: 342k, False: 10.3k]
Branch (3024:33): [True: 341k, False: 1.46k]
|
3025 | 341k | AddToCompactExtraTransactions(ptx); |
3026 | 341k | } |
3027 | 353k | for (const Txid& parent_txid : unique_parents) { Branch (3027:34): [True: 302k, False: 353k]
|
3028 | 302k | if (peer) AddKnownTx(*peer, parent_txid); Branch (3028:13): [True: 302k, False: 0]
|
3029 | 302k | } |
3030 | | |
3031 | 353k | MaybePunishNodeForTx(nodeid, state); |
3032 | | |
3033 | 353k | return package_to_validate; |
3034 | 353k | } |
3035 | | |
3036 | | void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions) |
3037 | 38.1k | { |
3038 | 38.1k | AssertLockNotHeld(m_peer_mutex); |
3039 | 38.1k | AssertLockHeld(g_msgproc_mutex); |
3040 | 38.1k | AssertLockHeld(m_tx_download_mutex); |
3041 | | |
3042 | 38.1k | m_txdownloadman.MempoolAcceptedTx(tx); |
3043 | | |
3044 | 38.1k | LogDebug(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n", |
3045 | 38.1k | nodeid, |
3046 | 38.1k | tx->GetHash().ToString(), |
3047 | 38.1k | tx->GetWitnessHash().ToString(), |
3048 | 38.1k | m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000); |
3049 | | |
3050 | 38.1k | RelayTransaction(tx->GetHash(), tx->GetWitnessHash()); |
3051 | | |
3052 | 38.1k | for (const CTransactionRef& removedTx : replaced_transactions) { Branch (3052:43): [True: 4.90k, False: 38.1k]
|
3053 | 4.90k | AddToCompactExtraTransactions(removedTx); |
3054 | 4.90k | } |
3055 | 38.1k | } |
3056 | | |
3057 | | void PeerManagerImpl::ProcessPackageResult(const node::PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result) |
3058 | 2.53k | { |
3059 | 2.53k | AssertLockNotHeld(m_peer_mutex); |
3060 | 2.53k | AssertLockHeld(g_msgproc_mutex); |
3061 | 2.53k | AssertLockHeld(m_tx_download_mutex); |
3062 | | |
3063 | 2.53k | const auto& package = package_to_validate.m_txns; |
3064 | 2.53k | const auto& senders = package_to_validate.m_senders; |
3065 | | |
3066 | 2.53k | if (package_result.m_state.IsInvalid()) { Branch (3066:9): [True: 2.45k, False: 82]
|
3067 | 2.45k | m_txdownloadman.MempoolRejectedPackage(package); |
3068 | 2.45k | } |
3069 | | // We currently only expect to process 1-parent-1-child packages. Remove if this changes. |
3070 | 2.53k | if (!Assume(package.size() == 2)) return; Branch (3070:9): [True: 0, False: 2.53k]
|
3071 | | |
3072 | | // Iterate backwards to erase in-package descendants from the orphanage before they become |
3073 | | // relevant in AddChildrenToWorkSet. |
3074 | 2.53k | auto package_iter = package.rbegin(); |
3075 | 2.53k | auto senders_iter = senders.rbegin(); |
3076 | 7.60k | while (package_iter != package.rend()) { Branch (3076:12): [True: 5.07k, False: 2.53k]
|
3077 | 5.07k | const auto& tx = *package_iter; |
3078 | 5.07k | const NodeId nodeid = *senders_iter; |
3079 | 5.07k | const auto it_result{package_result.m_tx_results.find(tx->GetWitnessHash())}; |
3080 | | |
3081 | | // It is not guaranteed that a result exists for every transaction. |
3082 | 5.07k | if (it_result != package_result.m_tx_results.end()) { Branch (3082:13): [True: 3.95k, False: 1.11k]
|
3083 | 3.95k | const auto& tx_result = it_result->second; |
3084 | 3.95k | switch (tx_result.m_result_type) { Branch (3084:21): [True: 0, False: 3.95k]
|
3085 | 182 | case MempoolAcceptResult::ResultType::VALID: Branch (3085:17): [True: 182, False: 3.77k]
|
3086 | 182 | { |
3087 | 182 | ProcessValidTx(nodeid, tx, tx_result.m_replaced_transactions); |
3088 | 182 | break; |
3089 | 0 | } |
3090 | 3.77k | case MempoolAcceptResult::ResultType::INVALID: Branch (3090:17): [True: 3.77k, False: 182]
|
3091 | 3.77k | case MempoolAcceptResult::ResultType::DIFFERENT_WITNESS: Branch (3091:17): [True: 0, False: 3.95k]
|
3092 | 3.77k | { |
3093 | | // Don't add to vExtraTxnForCompact, as these transactions should have already been |
3094 | | // added there when added to the orphanage or rejected for TX_RECONSIDERABLE. |
3095 | | // This should be updated if package submission is ever used for transactions |
3096 | | // that haven't already been validated before. |
3097 | 3.77k | ProcessInvalidTx(nodeid, tx, tx_result.m_state, /*first_time_failure=*/false); |
3098 | 3.77k | break; |
3099 | 3.77k | } |
3100 | 0 | case MempoolAcceptResult::ResultType::MEMPOOL_ENTRY: Branch (3100:17): [True: 0, False: 3.95k]
|
3101 | 0 | { |
3102 | | // AlreadyHaveTx() should be catching transactions that are already in mempool. |
3103 | 0 | Assume(false); |
3104 | 0 | break; |
3105 | 3.77k | } |
3106 | 3.95k | } |
3107 | 3.95k | } |
3108 | 5.07k | package_iter++; |
3109 | 5.07k | senders_iter++; |
3110 | 5.07k | } |
3111 | 2.53k | } |
3112 | | |
3113 | | // NOTE: the orphan processing used to be uninterruptible and quadratic, which could allow a peer to stall the node for |
3114 | | // hours with specially crafted transactions. See https://bitcoincore.org/en/2024/07/03/disclose-orphan-dos. |
3115 | | bool PeerManagerImpl::ProcessOrphanTx(Peer& peer) |
3116 | 42.4M | { |
3117 | 42.4M | AssertLockHeld(g_msgproc_mutex); |
3118 | 42.4M | LOCK2(::cs_main, m_tx_download_mutex); |
3119 | | |
3120 | 42.4M | CTransactionRef porphanTx = nullptr; |
3121 | | |
3122 | 42.4M | while (CTransactionRef porphanTx = m_txdownloadman.GetTxToReconsider(peer.m_id)) { Branch (3122:28): [True: 9.41k, False: 42.4M]
|
3123 | 9.41k | const MempoolAcceptResult result = m_chainman.ProcessTransaction(porphanTx); |
3124 | 9.41k | const TxValidationState& state = result.m_state; |
3125 | 9.41k | const Txid& orphanHash = porphanTx->GetHash(); |
3126 | 9.41k | const Wtxid& orphan_wtxid = porphanTx->GetWitnessHash(); |
3127 | | |
3128 | 9.41k | if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) { Branch (3128:13): [True: 2.83k, False: 6.57k]
|
3129 | 2.83k | LogDebug(BCLog::TXPACKAGES, " accepted orphan tx %s (wtxid=%s)\n", orphanHash.ToString(), orphan_wtxid.ToString()); |
3130 | 2.83k | ProcessValidTx(peer.m_id, porphanTx, result.m_replaced_transactions); |
3131 | 2.83k | return true; |
3132 | 6.57k | } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) { Branch (3132:20): [True: 5.71k, False: 865]
|
3133 | 5.71k | LogDebug(BCLog::TXPACKAGES, " invalid orphan tx %s (wtxid=%s) from peer=%d. %s\n", |
3134 | 5.71k | orphanHash.ToString(), |
3135 | 5.71k | orphan_wtxid.ToString(), |
3136 | 5.71k | peer.m_id, |
3137 | 5.71k | state.ToString()); |
3138 | | |
3139 | 5.71k | if (Assume(state.IsInvalid() && |
3140 | 5.71k | state.GetResult() != TxValidationResult::TX_UNKNOWN && |
3141 | 5.71k | state.GetResult() != TxValidationResult::TX_NO_MEMPOOL && |
3142 | 5.71k | state.GetResult() != TxValidationResult::TX_RESULT_UNSET)) { |
3143 | 5.71k | ProcessInvalidTx(peer.m_id, porphanTx, state, /*first_time_failure=*/false); |
3144 | 5.71k | } |
3145 | 5.71k | return true; |
3146 | 5.71k | } |
3147 | 9.41k | } |
3148 | | |
3149 | 42.4M | return false; |
3150 | 42.4M | } |
3151 | | |
3152 | | bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer, |
3153 | | BlockFilterType filter_type, uint32_t start_height, |
3154 | | const uint256& stop_hash, uint32_t max_height_diff, |
3155 | | const CBlockIndex*& stop_index, |
3156 | | BlockFilterIndex*& filter_index) |
3157 | 503 | { |
3158 | 503 | const bool supported_filter_type = |
3159 | 503 | (filter_type == BlockFilterType::BASIC && Branch (3159:10): [True: 17, False: 486]
|
3160 | 503 | (peer.m_our_services & NODE_COMPACT_FILTERS)); Branch (3160:10): [True: 17, False: 0]
|
3161 | 503 | if (!supported_filter_type) { Branch (3161:9): [True: 486, False: 17]
|
3162 | 486 | LogDebug(BCLog::NET, "peer requested unsupported block filter type: %d, %s\n", |
3163 | 486 | static_cast<uint8_t>(filter_type), node.DisconnectMsg(fLogIPs)); |
3164 | 486 | node.fDisconnect = true; |
3165 | 486 | return false; |
3166 | 486 | } |
3167 | | |
3168 | 17 | { |
3169 | 17 | LOCK(cs_main); |
3170 | 17 | stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash); |
3171 | | |
3172 | | // Check that the stop block exists and the peer would be allowed to fetch it. |
3173 | 17 | if (!stop_index || !BlockRequestAllowed(stop_index)) { Branch (3173:13): [True: 17, False: 0]
Branch (3173:28): [True: 0, False: 0]
|
3174 | 17 | LogDebug(BCLog::NET, "peer requested invalid block hash: %s, %s\n", |
3175 | 17 | stop_hash.ToString(), node.DisconnectMsg(fLogIPs)); |
3176 | 17 | node.fDisconnect = true; |
3177 | 17 | return false; |
3178 | 17 | } |
3179 | 17 | } |
3180 | | |
3181 | 0 | uint32_t stop_height = stop_index->nHeight; |
3182 | 0 | if (start_height > stop_height) { Branch (3182:9): [True: 0, False: 0]
|
3183 | 0 | LogDebug(BCLog::NET, "peer sent invalid getcfilters/getcfheaders with " |
3184 | 0 | "start height %d and stop height %d, %s\n", |
3185 | 0 | start_height, stop_height, node.DisconnectMsg(fLogIPs)); |
3186 | 0 | node.fDisconnect = true; |
3187 | 0 | return false; |
3188 | 0 | } |
3189 | 0 | if (stop_height - start_height >= max_height_diff) { Branch (3189:9): [True: 0, False: 0]
|
3190 | 0 | LogDebug(BCLog::NET, "peer requested too many cfilters/cfheaders: %d / %d, %s\n", |
3191 | 0 | stop_height - start_height + 1, max_height_diff, node.DisconnectMsg(fLogIPs)); |
3192 | 0 | node.fDisconnect = true; |
3193 | 0 | return false; |
3194 | 0 | } |
3195 | | |
3196 | 0 | filter_index = GetBlockFilterIndex(filter_type); |
3197 | 0 | if (!filter_index) { Branch (3197:9): [True: 0, False: 0]
|
3198 | 0 | LogDebug(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type)); |
3199 | 0 | return false; |
3200 | 0 | } |
3201 | | |
3202 | 0 | return true; |
3203 | 0 | } |
3204 | | |
3205 | | void PeerManagerImpl::ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv) |
3206 | 327 | { |
3207 | 327 | uint8_t filter_type_ser; |
3208 | 327 | uint32_t start_height; |
3209 | 327 | uint256 stop_hash; |
3210 | | |
3211 | 327 | vRecv >> filter_type_ser >> start_height >> stop_hash; |
3212 | | |
3213 | 327 | const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser); |
3214 | | |
3215 | 327 | const CBlockIndex* stop_index; |
3216 | 327 | BlockFilterIndex* filter_index; |
3217 | 327 | if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash, Branch (3217:9): [True: 286, False: 41]
|
3218 | 327 | MAX_GETCFILTERS_SIZE, stop_index, filter_index)) { |
3219 | 286 | return; |
3220 | 286 | } |
3221 | | |
3222 | 41 | std::vector<BlockFilter> filters; |
3223 | 41 | if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) { Branch (3223:9): [True: 0, False: 41]
|
3224 | 0 | LogDebug(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n", |
3225 | 0 | BlockFilterTypeName(filter_type), start_height, stop_hash.ToString()); |
3226 | 0 | return; |
3227 | 0 | } |
3228 | | |
3229 | 41 | for (const auto& filter : filters) { Branch (3229:29): [True: 0, False: 41]
|
3230 | 0 | MakeAndPushMessage(node, NetMsgType::CFILTER, filter); |
3231 | 0 | } |
3232 | 41 | } |
3233 | | |
3234 | | void PeerManagerImpl::ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv) |
3235 | 165 | { |
3236 | 165 | uint8_t filter_type_ser; |
3237 | 165 | uint32_t start_height; |
3238 | 165 | uint256 stop_hash; |
3239 | | |
3240 | 165 | vRecv >> filter_type_ser >> start_height >> stop_hash; |
3241 | | |
3242 | 165 | const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser); |
3243 | | |
3244 | 165 | const CBlockIndex* stop_index; |
3245 | 165 | BlockFilterIndex* filter_index; |
3246 | 165 | if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash, Branch (3246:9): [True: 130, False: 35]
|
3247 | 165 | MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) { |
3248 | 130 | return; |
3249 | 130 | } |
3250 | | |
3251 | 35 | uint256 prev_header; |
3252 | 35 | if (start_height > 0) { Branch (3252:9): [True: 0, False: 35]
|
3253 | 0 | const CBlockIndex* const prev_block = |
3254 | 0 | stop_index->GetAncestor(static_cast<int>(start_height - 1)); |
3255 | 0 | if (!filter_index->LookupFilterHeader(prev_block, prev_header)) { Branch (3255:13): [True: 0, False: 0]
|
3256 | 0 | LogDebug(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n", |
3257 | 0 | BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString()); |
3258 | 0 | return; |
3259 | 0 | } |
3260 | 0 | } |
3261 | | |
3262 | 35 | std::vector<uint256> filter_hashes; |
3263 | 35 | if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) { Branch (3263:9): [True: 0, False: 35]
|
3264 | 0 | LogDebug(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n", |
3265 | 0 | BlockFilterTypeName(filter_type), start_height, stop_hash.ToString()); |
3266 | 0 | return; |
3267 | 0 | } |
3268 | | |
3269 | 35 | MakeAndPushMessage(node, NetMsgType::CFHEADERS, |
3270 | 35 | filter_type_ser, |
3271 | 35 | stop_index->GetBlockHash(), |
3272 | 35 | prev_header, |
3273 | 35 | filter_hashes); |
3274 | 35 | } |
3275 | | |
3276 | | void PeerManagerImpl::ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv) |
3277 | 117 | { |
3278 | 117 | uint8_t filter_type_ser; |
3279 | 117 | uint256 stop_hash; |
3280 | | |
3281 | 117 | vRecv >> filter_type_ser >> stop_hash; |
3282 | | |
3283 | 117 | const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser); |
3284 | | |
3285 | 117 | const CBlockIndex* stop_index; |
3286 | 117 | BlockFilterIndex* filter_index; |
3287 | 117 | if (!PrepareBlockFilterRequest(node, peer, filter_type, /*start_height=*/0, stop_hash, Branch (3287:9): [True: 87, False: 30]
|
3288 | 117 | /*max_height_diff=*/std::numeric_limits<uint32_t>::max(), |
3289 | 117 | stop_index, filter_index)) { |
3290 | 87 | return; |
3291 | 87 | } |
3292 | | |
3293 | 30 | std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL); |
3294 | | |
3295 | | // Populate headers. |
3296 | 30 | const CBlockIndex* block_index = stop_index; |
3297 | 30 | for (int i = headers.size() - 1; i >= 0; i--) { Branch (3297:38): [True: 0, False: 30]
|
3298 | 0 | int height = (i + 1) * CFCHECKPT_INTERVAL; |
3299 | 0 | block_index = block_index->GetAncestor(height); |
3300 | |
|
3301 | 0 | if (!filter_index->LookupFilterHeader(block_index, headers[i])) { Branch (3301:13): [True: 0, False: 0]
|
3302 | 0 | LogDebug(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n", |
3303 | 0 | BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString()); |
3304 | 0 | return; |
3305 | 0 | } |
3306 | 0 | } |
3307 | | |
3308 | 30 | MakeAndPushMessage(node, NetMsgType::CFCHECKPT, |
3309 | 30 | filter_type_ser, |
3310 | 30 | stop_index->GetBlockHash(), |
3311 | 30 | headers); |
3312 | 30 | } |
3313 | | |
3314 | | void PeerManagerImpl::ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked) |
3315 | 2.24M | { |
3316 | 2.24M | bool new_block{false}; |
3317 | 2.24M | m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked, &new_block); |
3318 | 2.24M | if (new_block) { Branch (3318:9): [True: 2.23M, False: 14.2k]
|
3319 | 2.23M | node.m_last_block_time = GetTime<std::chrono::seconds>(); |
3320 | | // In case this block came from a different peer than we requested |
3321 | | // from, we can erase the block request now anyway (as we just stored |
3322 | | // this block to disk). |
3323 | 2.23M | LOCK(cs_main); |
3324 | 2.23M | RemoveBlockRequest(block->GetHash(), std::nullopt); |
3325 | 2.23M | } else { |
3326 | 14.2k | LOCK(cs_main); |
3327 | 14.2k | mapBlockSource.erase(block->GetHash()); |
3328 | 14.2k | } |
3329 | 2.24M | } |
3330 | | |
3331 | | void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions) |
3332 | 16 | { |
3333 | 16 | std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); |
3334 | 16 | bool fBlockRead{false}; |
3335 | 16 | { |
3336 | 16 | LOCK(cs_main); |
3337 | | |
3338 | 16 | auto range_flight = mapBlocksInFlight.equal_range(block_transactions.blockhash); |
3339 | 16 | size_t already_in_flight = std::distance(range_flight.first, range_flight.second); |
3340 | 16 | bool requested_block_from_this_peer{false}; |
3341 | | |
3342 | | // Multimap ensures ordering of outstanding requests. It's either empty or first in line. |
3343 | 16 | bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId()); Branch (3343:32): [True: 16, False: 0]
Branch (3343:58): [True: 0, False: 0]
|
3344 | | |
3345 | 16 | while (range_flight.first != range_flight.second) { Branch (3345:16): [True: 0, False: 16]
|
3346 | 0 | auto [node_id, block_it] = range_flight.first->second; |
3347 | 0 | if (node_id == pfrom.GetId() && block_it->partialBlock) { Branch (3347:17): [True: 0, False: 0]
Branch (3347:45): [True: 0, False: 0]
|
3348 | 0 | requested_block_from_this_peer = true; |
3349 | 0 | break; |
3350 | 0 | } |
3351 | 0 | range_flight.first++; |
3352 | 0 | } |
3353 | | |
3354 | 16 | if (!requested_block_from_this_peer) { Branch (3354:13): [True: 16, False: 0]
|
3355 | 16 | LogDebug(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId()); |
3356 | 16 | return; |
3357 | 16 | } |
3358 | | |
3359 | 0 | PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock; |
3360 | 0 | ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn); |
3361 | 0 | if (status == READ_STATUS_INVALID) { Branch (3361:13): [True: 0, False: 0]
|
3362 | 0 | RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect |
3363 | 0 | Misbehaving(peer, "invalid compact block/non-matching block transactions"); |
3364 | 0 | return; |
3365 | 0 | } else if (status == READ_STATUS_FAILED) { Branch (3365:20): [True: 0, False: 0]
|
3366 | 0 | if (first_in_flight) { Branch (3366:17): [True: 0, False: 0]
|
3367 | | // Might have collided, fall back to getdata now :( |
3368 | 0 | std::vector<CInv> invs; |
3369 | 0 | invs.emplace_back(MSG_BLOCK | GetFetchFlags(peer), block_transactions.blockhash); |
3370 | 0 | MakeAndPushMessage(pfrom, NetMsgType::GETDATA, invs); |
3371 | 0 | } else { |
3372 | 0 | RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); |
3373 | 0 | LogDebug(BCLog::NET, "Peer %d sent us a compact block but it failed to reconstruct, waiting on first download to complete\n", pfrom.GetId()); |
3374 | 0 | return; |
3375 | 0 | } |
3376 | 0 | } else { |
3377 | | // Block is either okay, or possibly we received |
3378 | | // READ_STATUS_CHECKBLOCK_FAILED. |
3379 | | // Note that CheckBlock can only fail for one of a few reasons: |
3380 | | // 1. bad-proof-of-work (impossible here, because we've already |
3381 | | // accepted the header) |
3382 | | // 2. merkleroot doesn't match the transactions given (already |
3383 | | // caught in FillBlock with READ_STATUS_FAILED, so |
3384 | | // impossible here) |
3385 | | // 3. the block is otherwise invalid (eg invalid coinbase, |
3386 | | // block is too big, too many legacy sigops, etc). |
3387 | | // So if CheckBlock failed, #3 is the only possibility. |
3388 | | // Under BIP 152, we don't discourage the peer unless proof of work is |
3389 | | // invalid (we don't require all the stateless checks to have |
3390 | | // been run). This is handled below, so just treat this as |
3391 | | // though the block was successfully read, and rely on the |
3392 | | // handling in ProcessNewBlock to ensure the block index is |
3393 | | // updated, etc. |
3394 | 0 | RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // it is now an empty pointer |
3395 | 0 | fBlockRead = true; |
3396 | | // mapBlockSource is used for potentially punishing peers and |
3397 | | // updating which peers send us compact blocks, so the race |
3398 | | // between here and cs_main in ProcessNewBlock is fine. |
3399 | | // BIP 152 permits peers to relay compact blocks after validating |
3400 | | // the header only; we should not punish peers if the block turns |
3401 | | // out to be invalid. |
3402 | 0 | mapBlockSource.emplace(block_transactions.blockhash, std::make_pair(pfrom.GetId(), false)); |
3403 | 0 | } |
3404 | 0 | } // Don't hold cs_main when we call into ProcessNewBlock |
3405 | 0 | if (fBlockRead) { Branch (3405:9): [True: 0, False: 0]
|
3406 | | // Since we requested this block (it was in mapBlocksInFlight), force it to be processed, |
3407 | | // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc) |
3408 | | // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent |
3409 | | // disk-space attacks), but this should be safe due to the |
3410 | | // protections in the compact block handler -- see related comment |
3411 | | // in compact block optimistic reconstruction handling. |
3412 | 0 | ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true); |
3413 | 0 | } |
3414 | 0 | return; |
3415 | 0 | } |
3416 | | |
3417 | 15.8k | void PeerManagerImpl::LogBlockHeader(const CBlockIndex& index, const CNode& peer, bool via_compact_block) { |
3418 | | // To prevent log spam, this function should only be called after it was determined that a |
3419 | | // header is both new and valid. |
3420 | | // |
3421 | | // These messages are valuable for detecting potential selfish mining behavior; |
3422 | | // if multiple displacing headers are seen near simultaneously across many |
3423 | | // nodes in the network, this might be an indication of selfish mining. |
3424 | | // In addition it can be used to identify peers which send us a header, but |
3425 | | // don't followup with a complete and valid (compact) block. |
3426 | | // Having this log by default when not in IBD ensures broad availability of |
3427 | | // this data in case investigation is merited. |
3428 | 15.8k | const auto msg = strprintf( |
3429 | 15.8k | "Saw new %sheader hash=%s height=%d peer=%d%s", |
3430 | 15.8k | via_compact_block ? "cmpctblock " : "", Branch (3430:9): [True: 0, False: 15.8k]
|
3431 | 15.8k | index.GetBlockHash().ToString(), |
3432 | 15.8k | index.nHeight, |
3433 | 15.8k | peer.GetId(), |
3434 | 15.8k | peer.LogIP(fLogIPs) |
3435 | 15.8k | ); |
3436 | 15.8k | if (m_chainman.IsInitialBlockDownload()) { Branch (3436:9): [True: 0, False: 15.8k]
|
3437 | 0 | LogDebug(BCLog::VALIDATION, "%s", msg); |
3438 | 15.8k | } else { |
3439 | 15.8k | LogInfo("%s", msg); |
3440 | 15.8k | } |
3441 | 15.8k | } |
3442 | | |
3443 | | void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, DataStream& vRecv, |
3444 | | const std::chrono::microseconds time_received, |
3445 | | const std::atomic<bool>& interruptMsgProc) |
3446 | 5.51M | { |
3447 | 5.51M | AssertLockHeld(g_msgproc_mutex); |
3448 | | |
3449 | 5.51M | LogDebug(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId()); |
3450 | | |
3451 | 5.51M | PeerRef peer = GetPeerRef(pfrom.GetId()); |
3452 | 5.51M | if (peer == nullptr) return; Branch (3452:9): [True: 0, False: 5.51M]
|
3453 | | |
3454 | 5.51M | if (msg_type == NetMsgType::VERSION) { Branch (3454:9): [True: 88.9k, False: 5.42M]
|
3455 | 88.9k | if (pfrom.nVersion != 0) { Branch (3455:13): [True: 204, False: 88.7k]
|
3456 | 204 | LogDebug(BCLog::NET, "redundant version message from peer=%d\n", pfrom.GetId()); |
3457 | 204 | return; |
3458 | 204 | } |
3459 | | |
3460 | 88.7k | int64_t nTime; |
3461 | 88.7k | CService addrMe; |
3462 | 88.7k | uint64_t nNonce = 1; |
3463 | 88.7k | ServiceFlags nServices; |
3464 | 88.7k | int nVersion; |
3465 | 88.7k | std::string cleanSubVer; |
3466 | 88.7k | int starting_height = -1; |
3467 | 88.7k | bool fRelay = true; |
3468 | | |
3469 | 88.7k | vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime; |
3470 | 88.7k | if (nTime < 0) { Branch (3470:13): [True: 0, False: 88.7k]
|
3471 | 0 | nTime = 0; |
3472 | 0 | } |
3473 | 88.7k | vRecv.ignore(8); // Ignore the addrMe service bits sent by the peer |
3474 | 88.7k | vRecv >> CNetAddr::V1(addrMe); |
3475 | 88.7k | if (!pfrom.IsInboundConn()) Branch (3475:13): [True: 44.3k, False: 44.3k]
|
3476 | 44.3k | { |
3477 | | // Overwrites potentially existing services. In contrast to this, |
3478 | | // unvalidated services received via gossip relay in ADDR/ADDRV2 |
3479 | | // messages are only ever added but cannot replace existing ones. |
3480 | 44.3k | m_addrman.SetServices(pfrom.addr, nServices); |
3481 | 44.3k | } |
3482 | 88.7k | if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices)) Branch (3482:13): [True: 44.3k, False: 44.3k]
Branch (3482:47): [True: 0, False: 44.3k]
|
3483 | 0 | { |
3484 | 0 | LogDebug(BCLog::NET, "peer does not offer the expected services (%08x offered, %08x expected), %s\n", |
3485 | 0 | nServices, |
3486 | 0 | GetDesirableServiceFlags(nServices), |
3487 | 0 | pfrom.DisconnectMsg(fLogIPs)); |
3488 | 0 | pfrom.fDisconnect = true; |
3489 | 0 | return; |
3490 | 0 | } |
3491 | | |
3492 | 88.7k | if (nVersion < MIN_PEER_PROTO_VERSION) { Branch (3492:13): [True: 0, False: 88.7k]
|
3493 | | // disconnect from peers older than this proto version |
3494 | 0 | LogDebug(BCLog::NET, "peer using obsolete version %i, %s\n", nVersion, pfrom.DisconnectMsg(fLogIPs)); |
3495 | 0 | pfrom.fDisconnect = true; |
3496 | 0 | return; |
3497 | 0 | } |
3498 | | |
3499 | 88.7k | if (!vRecv.empty()) { Branch (3499:13): [True: 88.7k, False: 0]
|
3500 | | // The version message includes information about the sending node which we don't use: |
3501 | | // - 8 bytes (service bits) |
3502 | | // - 16 bytes (ipv6 address) |
3503 | | // - 2 bytes (port) |
3504 | 88.7k | vRecv.ignore(26); |
3505 | 88.7k | vRecv >> nNonce; |
3506 | 88.7k | } |
3507 | 88.7k | if (!vRecv.empty()) { Branch (3507:13): [True: 88.7k, False: 0]
|
3508 | 88.7k | std::string strSubVer; |
3509 | 88.7k | vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH); |
3510 | 88.7k | cleanSubVer = SanitizeString(strSubVer); |
3511 | 88.7k | } |
3512 | 88.7k | if (!vRecv.empty()) { Branch (3512:13): [True: 88.7k, False: 0]
|
3513 | 88.7k | vRecv >> starting_height; |
3514 | 88.7k | } |
3515 | 88.7k | if (!vRecv.empty()) Branch (3515:13): [True: 88.7k, False: 0]
|
3516 | 88.7k | vRecv >> fRelay; |
3517 | | // Disconnect if we connected to ourself |
3518 | 88.7k | if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce)) Branch (3518:13): [True: 44.3k, False: 44.3k]
Branch (3518:38): [True: 0, False: 44.3k]
|
3519 | 0 | { |
3520 | 0 | LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToStringAddrPort()); |
3521 | 0 | pfrom.fDisconnect = true; |
3522 | 0 | return; |
3523 | 0 | } |
3524 | | |
3525 | 88.7k | if (pfrom.IsInboundConn() && addrMe.IsRoutable()) Branch (3525:13): [True: 44.3k, False: 44.3k]
Branch (3525:38): [True: 0, False: 44.3k]
|
3526 | 0 | { |
3527 | 0 | SeenLocal(addrMe); |
3528 | 0 | } |
3529 | | |
3530 | | // Inbound peers send us their version message when they connect. |
3531 | | // We send our version message in response. |
3532 | 88.7k | if (pfrom.IsInboundConn()) { Branch (3532:13): [True: 44.3k, False: 44.3k]
|
3533 | 44.3k | PushNodeVersion(pfrom, *peer); |
3534 | 44.3k | } |
3535 | | |
3536 | | // Change version |
3537 | 88.7k | const int greatest_common_version = std::min(nVersion, PROTOCOL_VERSION); |
3538 | 88.7k | pfrom.SetCommonVersion(greatest_common_version); |
3539 | 88.7k | pfrom.nVersion = nVersion; |
3540 | | |
3541 | 88.7k | if (greatest_common_version >= WTXID_RELAY_VERSION) { Branch (3541:13): [True: 88.7k, False: 0]
|
3542 | 88.7k | MakeAndPushMessage(pfrom, NetMsgType::WTXIDRELAY); |
3543 | 88.7k | } |
3544 | | |
3545 | | // Signal ADDRv2 support (BIP155). |
3546 | 88.7k | if (greatest_common_version >= 70016) { Branch (3546:13): [True: 88.7k, False: 0]
|
3547 | | // BIP155 defines addrv2 and sendaddrv2 for all protocol versions, but some |
3548 | | // implementations reject messages they don't know. As a courtesy, don't send |
3549 | | // it to nodes with a version before 70016, as no software is known to support |
3550 | | // BIP155 that doesn't announce at least that protocol version number. |
3551 | 88.7k | MakeAndPushMessage(pfrom, NetMsgType::SENDADDRV2); |
3552 | 88.7k | } |
3553 | | |
3554 | 88.7k | pfrom.m_has_all_wanted_services = HasAllDesirableServiceFlags(nServices); |
3555 | 88.7k | peer->m_their_services = nServices; |
3556 | 88.7k | pfrom.SetAddrLocal(addrMe); |
3557 | 88.7k | { |
3558 | 88.7k | LOCK(pfrom.m_subver_mutex); |
3559 | 88.7k | pfrom.cleanSubVer = cleanSubVer; |
3560 | 88.7k | } |
3561 | 88.7k | peer->m_starting_height = starting_height; |
3562 | | |
3563 | | // Only initialize the Peer::TxRelay m_relay_txs data structure if: |
3564 | | // - this isn't an outbound block-relay-only connection, and |
3565 | | // - this isn't an outbound feeler connection, and |
3566 | | // - fRelay=true (the peer wishes to receive transaction announcements) |
3567 | | // or we're offering NODE_BLOOM to this peer. NODE_BLOOM means that |
3568 | | // the peer may turn on transaction relay later. |
3569 | 88.7k | if (!pfrom.IsBlockOnlyConn() && Branch (3569:13): [True: 88.7k, False: 0]
|
3570 | 88.7k | !pfrom.IsFeelerConn() && Branch (3570:13): [True: 88.7k, False: 0]
|
3571 | 88.7k | (fRelay || (peer->m_our_services & NODE_BLOOM))) { Branch (3571:14): [True: 88.7k, False: 0]
Branch (3571:24): [True: 0, False: 0]
|
3572 | 88.7k | auto* const tx_relay = peer->SetTxRelay(); |
3573 | 88.7k | { |
3574 | 88.7k | LOCK(tx_relay->m_bloom_filter_mutex); |
3575 | 88.7k | tx_relay->m_relay_txs = fRelay; // set to true after we get the first filter* message |
3576 | 88.7k | } |
3577 | 88.7k | if (fRelay) pfrom.m_relays_txs = true; Branch (3577:17): [True: 88.7k, False: 0]
|
3578 | 88.7k | } |
3579 | | |
3580 | 88.7k | if (greatest_common_version >= WTXID_RELAY_VERSION && m_txreconciliation) { Branch (3580:13): [True: 88.7k, False: 0]
Branch (3580:63): [True: 88.7k, False: 0]
|
3581 | | // Per BIP-330, we announce txreconciliation support if: |
3582 | | // - protocol version per the peer's VERSION message supports WTXID_RELAY; |
3583 | | // - transaction relay is supported per the peer's VERSION message |
3584 | | // - this is not a block-relay-only connection and not a feeler |
3585 | | // - this is not an addr fetch connection; |
3586 | | // - we are not in -blocksonly mode. |
3587 | 88.7k | const auto* tx_relay = peer->GetTxRelay(); |
3588 | 88.7k | if (tx_relay && WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs) && Branch (3588:17): [True: 88.7k, False: 0]
Branch (3588:17): [True: 88.7k, False: 0]
|
3589 | 88.7k | !pfrom.IsAddrFetchConn() && !m_opts.ignore_incoming_txs) { Branch (3589:17): [True: 88.7k, False: 0]
Branch (3589:45): [True: 88.7k, False: 0]
|
3590 | 88.7k | const uint64_t recon_salt = m_txreconciliation->PreRegisterPeer(pfrom.GetId()); |
3591 | 88.7k | MakeAndPushMessage(pfrom, NetMsgType::SENDTXRCNCL, |
3592 | 88.7k | TXRECONCILIATION_VERSION, recon_salt); |
3593 | 88.7k | } |
3594 | 88.7k | } |
3595 | | |
3596 | 88.7k | MakeAndPushMessage(pfrom, NetMsgType::VERACK); |
3597 | | |
3598 | | // Potentially mark this peer as a preferred download peer. |
3599 | 88.7k | { |
3600 | 88.7k | LOCK(cs_main); |
3601 | 88.7k | CNodeState* state = State(pfrom.GetId()); |
3602 | 88.7k | state->fPreferredDownload = (!pfrom.IsInboundConn() || pfrom.HasPermission(NetPermissionFlags::NoBan)) && !pfrom.IsAddrFetchConn() && CanServeBlocks(*peer); Branch (3602:42): [True: 44.3k, False: 44.3k]
Branch (3602:68): [True: 0, False: 44.3k]
Branch (3602:119): [True: 44.3k, False: 0]
Branch (3602:147): [True: 44.3k, False: 0]
|
3603 | 88.7k | m_num_preferred_download_peers += state->fPreferredDownload; |
3604 | 88.7k | } |
3605 | | |
3606 | | // Attempt to initialize address relay for outbound peers and use result |
3607 | | // to decide whether to send GETADDR, so that we don't send it to |
3608 | | // inbound or outbound block-relay-only peers. |
3609 | 88.7k | bool send_getaddr{false}; |
3610 | 88.7k | if (!pfrom.IsInboundConn()) { Branch (3610:13): [True: 44.3k, False: 44.3k]
|
3611 | 44.3k | send_getaddr = SetupAddressRelay(pfrom, *peer); |
3612 | 44.3k | } |
3613 | 88.7k | if (send_getaddr) { Branch (3613:13): [True: 44.3k, False: 44.3k]
|
3614 | | // Do a one-time address fetch to help populate/update our addrman. |
3615 | | // If we're starting up for the first time, our addrman may be pretty |
3616 | | // empty, so this mechanism is important to help us connect to the network. |
3617 | | // We skip this for block-relay-only peers. We want to avoid |
3618 | | // potentially leaking addr information and we do not want to |
3619 | | // indicate to the peer that we will participate in addr relay. |
3620 | 44.3k | MakeAndPushMessage(pfrom, NetMsgType::GETADDR); |
3621 | 44.3k | peer->m_getaddr_sent = true; |
3622 | | // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND addresses in response |
3623 | | // (bypassing the MAX_ADDR_PROCESSING_TOKEN_BUCKET limit). |
3624 | 44.3k | peer->m_addr_token_bucket += MAX_ADDR_TO_SEND; |
3625 | 44.3k | } |
3626 | | |
3627 | 88.7k | if (!pfrom.IsInboundConn()) { Branch (3627:13): [True: 44.3k, False: 44.3k]
|
3628 | | // For non-inbound connections, we update the addrman to record |
3629 | | // connection success so that addrman will have an up-to-date |
3630 | | // notion of which peers are online and available. |
3631 | | // |
3632 | | // While we strive to not leak information about block-relay-only |
3633 | | // connections via the addrman, not moving an address to the tried |
3634 | | // table is also potentially detrimental because new-table entries |
3635 | | // are subject to eviction in the event of addrman collisions. We |
3636 | | // mitigate the information-leak by never calling |
3637 | | // AddrMan::Connected() on block-relay-only peers; see |
3638 | | // FinalizeNode(). |
3639 | | // |
3640 | | // This moves an address from New to Tried table in Addrman, |
3641 | | // resolves tried-table collisions, etc. |
3642 | 44.3k | m_addrman.Good(pfrom.addr); |
3643 | 44.3k | } |
3644 | | |
3645 | 88.7k | const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)}; |
3646 | 88.7k | LogDebug(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s%s\n", |
3647 | 88.7k | cleanSubVer, pfrom.nVersion, |
3648 | 88.7k | peer->m_starting_height, addrMe.ToStringAddrPort(), fRelay, pfrom.GetId(), |
3649 | 88.7k | pfrom.LogIP(fLogIPs), (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : "")); |
3650 | | |
3651 | 88.7k | peer->m_time_offset = NodeSeconds{std::chrono::seconds{nTime}} - Now<NodeSeconds>(); |
3652 | 88.7k | if (!pfrom.IsInboundConn()) { Branch (3652:13): [True: 44.3k, False: 44.3k]
|
3653 | | // Don't use timedata samples from inbound peers to make it |
3654 | | // harder for others to create false warnings about our clock being out of sync. |
3655 | 44.3k | m_outbound_time_offsets.Add(peer->m_time_offset); |
3656 | 44.3k | m_outbound_time_offsets.WarnIfOutOfSync(); |
3657 | 44.3k | } |
3658 | | |
3659 | | // If the peer is old enough to have the old alert system, send it the final alert. |
3660 | 88.7k | if (greatest_common_version <= 70012) { Branch (3660:13): [True: 0, False: 88.7k]
|
3661 | 0 | constexpr auto finalAlert{"60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"_hex}; |
3662 | 0 | MakeAndPushMessage(pfrom, "alert", finalAlert); |
3663 | 0 | } |
3664 | | |
3665 | | // Feeler connections exist only to verify if address is online. |
3666 | 88.7k | if (pfrom.IsFeelerConn()) { Branch (3666:13): [True: 0, False: 88.7k]
|
3667 | 0 | LogDebug(BCLog::NET, "feeler connection completed, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
3668 | 0 | pfrom.fDisconnect = true; |
3669 | 0 | } |
3670 | 88.7k | return; |
3671 | 88.7k | } |
3672 | | |
3673 | 5.42M | if (pfrom.nVersion == 0) { Branch (3673:9): [True: 0, False: 5.42M]
|
3674 | | // Must have a version message before anything else |
3675 | 0 | LogDebug(BCLog::NET, "non-version message before version handshake. Message \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId()); |
3676 | 0 | return; |
3677 | 0 | } |
3678 | | |
3679 | 5.42M | if (msg_type == NetMsgType::VERACK) { Branch (3679:9): [True: 88.9k, False: 5.33M]
|
3680 | 88.9k | if (pfrom.fSuccessfullyConnected) { Branch (3680:13): [True: 230, False: 88.7k]
|
3681 | 230 | LogDebug(BCLog::NET, "ignoring redundant verack message from peer=%d\n", pfrom.GetId()); |
3682 | 230 | return; |
3683 | 230 | } |
3684 | | |
3685 | | // Log successful connections unconditionally for outbound, but not for inbound as those |
3686 | | // can be triggered by an attacker at high rate. |
3687 | 88.7k | if (!pfrom.IsInboundConn() || LogAcceptCategory(BCLog::NET, BCLog::Level::Debug)) { Branch (3687:13): [True: 44.3k, False: 44.3k]
Branch (3687:39): [True: 44.3k, False: 0]
|
3688 | 88.7k | const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)}; |
3689 | 88.7k | LogPrintf("New %s %s peer connected: version: %d, blocks=%d, peer=%d%s%s\n", |
3690 | 88.7k | pfrom.ConnectionTypeAsString(), |
3691 | 88.7k | TransportTypeAsString(pfrom.m_transport->GetInfo().transport_type), |
3692 | 88.7k | pfrom.nVersion.load(), peer->m_starting_height, |
3693 | 88.7k | pfrom.GetId(), pfrom.LogIP(fLogIPs), |
3694 | 88.7k | (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : "")); |
3695 | 88.7k | } |
3696 | | |
3697 | 88.7k | if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) { Branch (3697:13): [True: 88.7k, False: 0]
|
3698 | | // Tell our peer we are willing to provide version 2 cmpctblocks. |
3699 | | // However, we do not request new block announcements using |
3700 | | // cmpctblock messages. |
3701 | | // We send this to non-NODE NETWORK peers as well, because |
3702 | | // they may wish to request compact blocks from us |
3703 | 88.7k | MakeAndPushMessage(pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION); |
3704 | 88.7k | } |
3705 | | |
3706 | 88.7k | if (m_txreconciliation) { Branch (3706:13): [True: 88.7k, False: 0]
|
3707 | 88.7k | if (!peer->m_wtxid_relay || !m_txreconciliation->IsPeerRegistered(pfrom.GetId())) { Branch (3707:17): [True: 0, False: 88.7k]
Branch (3707:41): [True: 88.7k, False: 0]
|
3708 | | // We could have optimistically pre-registered/registered the peer. In that case, |
3709 | | // we should forget about the reconciliation state here if this wasn't followed |
3710 | | // by WTXIDRELAY (since WTXIDRELAY can't be announced later). |
3711 | 88.7k | m_txreconciliation->ForgetPeer(pfrom.GetId()); |
3712 | 88.7k | } |
3713 | 88.7k | } |
3714 | | |
3715 | 88.7k | if (auto tx_relay = peer->GetTxRelay()) { Branch (3715:18): [True: 88.7k, False: 0]
|
3716 | | // `TxRelay::m_tx_inventory_to_send` must be empty before the |
3717 | | // version handshake is completed as |
3718 | | // `TxRelay::m_next_inv_send_time` is first initialised in |
3719 | | // `SendMessages` after the verack is received. Any transactions |
3720 | | // received during the version handshake would otherwise |
3721 | | // immediately be advertised without random delay, potentially |
3722 | | // leaking the time of arrival to a spy. |
3723 | 88.7k | Assume(WITH_LOCK( |
3724 | 88.7k | tx_relay->m_tx_inventory_mutex, |
3725 | 88.7k | return tx_relay->m_tx_inventory_to_send.empty() && |
3726 | 88.7k | tx_relay->m_next_inv_send_time == 0s)); |
3727 | 88.7k | } |
3728 | | |
3729 | 88.7k | { |
3730 | 88.7k | LOCK2(::cs_main, m_tx_download_mutex); |
3731 | 88.7k | const CNodeState* state = State(pfrom.GetId()); |
3732 | 88.7k | m_txdownloadman.ConnectedPeer(pfrom.GetId(), node::TxDownloadConnectionInfo { |
3733 | 88.7k | .m_preferred = state->fPreferredDownload, |
3734 | 88.7k | .m_relay_permissions = pfrom.HasPermission(NetPermissionFlags::Relay), |
3735 | 88.7k | .m_wtxid_relay = peer->m_wtxid_relay, |
3736 | 88.7k | }); |
3737 | 88.7k | } |
3738 | | |
3739 | 88.7k | pfrom.fSuccessfullyConnected = true; |
3740 | 88.7k | return; |
3741 | 88.9k | } |
3742 | | |
3743 | 5.33M | if (msg_type == NetMsgType::SENDHEADERS) { Branch (3743:9): [True: 188, False: 5.33M]
|
3744 | 188 | peer->m_prefers_headers = true; |
3745 | 188 | return; |
3746 | 188 | } |
3747 | | |
3748 | 5.33M | if (msg_type == NetMsgType::SENDCMPCT) { Branch (3748:9): [True: 88.9k, False: 5.24M]
|
3749 | 88.9k | bool sendcmpct_hb{false}; |
3750 | 88.9k | uint64_t sendcmpct_version{0}; |
3751 | 88.9k | vRecv >> sendcmpct_hb >> sendcmpct_version; |
3752 | | |
3753 | | // Only support compact block relay with witnesses |
3754 | 88.9k | if (sendcmpct_version != CMPCTBLOCKS_VERSION) return; Branch (3754:13): [True: 226, False: 88.7k]
|
3755 | | |
3756 | 88.7k | LOCK(cs_main); |
3757 | 88.7k | CNodeState* nodestate = State(pfrom.GetId()); |
3758 | 88.7k | nodestate->m_provides_cmpctblocks = true; |
3759 | 88.7k | nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb; |
3760 | | // save whether peer selects us as BIP152 high-bandwidth peer |
3761 | | // (receiving sendcmpct(1) signals high-bandwidth, sendcmpct(0) low-bandwidth) |
3762 | 88.7k | pfrom.m_bip152_highbandwidth_from = sendcmpct_hb; |
3763 | 88.7k | return; |
3764 | 88.9k | } |
3765 | | |
3766 | | // BIP339 defines feature negotiation of wtxidrelay, which must happen between |
3767 | | // VERSION and VERACK to avoid relay problems from switching after a connection is up. |
3768 | 5.24M | if (msg_type == NetMsgType::WTXIDRELAY) { Branch (3768:9): [True: 88.8k, False: 5.15M]
|
3769 | 88.8k | if (pfrom.fSuccessfullyConnected) { Branch (3769:13): [True: 149, False: 88.7k]
|
3770 | | // Disconnect peers that send a wtxidrelay message after VERACK. |
3771 | 149 | LogDebug(BCLog::NET, "wtxidrelay received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
3772 | 149 | pfrom.fDisconnect = true; |
3773 | 149 | return; |
3774 | 149 | } |
3775 | 88.7k | if (pfrom.GetCommonVersion() >= WTXID_RELAY_VERSION) { Branch (3775:13): [True: 88.7k, False: 0]
|
3776 | 88.7k | if (!peer->m_wtxid_relay) { Branch (3776:17): [True: 88.7k, False: 0]
|
3777 | 88.7k | peer->m_wtxid_relay = true; |
3778 | 88.7k | m_wtxid_relay_peers++; |
3779 | 88.7k | } else { |
3780 | 0 | LogDebug(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId()); |
3781 | 0 | } |
3782 | 88.7k | } else { |
3783 | 0 | LogDebug(BCLog::NET, "ignoring wtxidrelay due to old common version=%d from peer=%d\n", pfrom.GetCommonVersion(), pfrom.GetId()); |
3784 | 0 | } |
3785 | 88.7k | return; |
3786 | 88.8k | } |
3787 | | |
3788 | | // BIP155 defines feature negotiation of addrv2 and sendaddrv2, which must happen |
3789 | | // between VERSION and VERACK. |
3790 | 5.15M | if (msg_type == NetMsgType::SENDADDRV2) { Branch (3790:9): [True: 163, False: 5.15M]
|
3791 | 163 | if (pfrom.fSuccessfullyConnected) { Branch (3791:13): [True: 163, False: 0]
|
3792 | | // Disconnect peers that send a SENDADDRV2 message after VERACK. |
3793 | 163 | LogDebug(BCLog::NET, "sendaddrv2 received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
3794 | 163 | pfrom.fDisconnect = true; |
3795 | 163 | return; |
3796 | 163 | } |
3797 | 0 | peer->m_wants_addrv2 = true; |
3798 | 0 | return; |
3799 | 163 | } |
3800 | | |
3801 | | // Received from a peer demonstrating readiness to announce transactions via reconciliations. |
3802 | | // This feature negotiation must happen between VERSION and VERACK to avoid relay problems |
3803 | | // from switching announcement protocols after the connection is up. |
3804 | 5.15M | if (msg_type == NetMsgType::SENDTXRCNCL) { Branch (3804:9): [True: 0, False: 5.15M]
|
3805 | 0 | if (!m_txreconciliation) { Branch (3805:13): [True: 0, False: 0]
|
3806 | 0 | LogDebug(BCLog::NET, "sendtxrcncl from peer=%d ignored, as our node does not have txreconciliation enabled\n", pfrom.GetId()); |
3807 | 0 | return; |
3808 | 0 | } |
3809 | | |
3810 | 0 | if (pfrom.fSuccessfullyConnected) { Branch (3810:13): [True: 0, False: 0]
|
3811 | 0 | LogDebug(BCLog::NET, "sendtxrcncl received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
3812 | 0 | pfrom.fDisconnect = true; |
3813 | 0 | return; |
3814 | 0 | } |
3815 | | |
3816 | | // Peer must not offer us reconciliations if we specified no tx relay support in VERSION. |
3817 | 0 | if (RejectIncomingTxs(pfrom)) { Branch (3817:13): [True: 0, False: 0]
|
3818 | 0 | LogDebug(BCLog::NET, "sendtxrcncl received to which we indicated no tx relay, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
3819 | 0 | pfrom.fDisconnect = true; |
3820 | 0 | return; |
3821 | 0 | } |
3822 | | |
3823 | | // Peer must not offer us reconciliations if they specified no tx relay support in VERSION. |
3824 | | // This flag might also be false in other cases, but the RejectIncomingTxs check above |
3825 | | // eliminates them, so that this flag fully represents what we are looking for. |
3826 | 0 | const auto* tx_relay = peer->GetTxRelay(); |
3827 | 0 | if (!tx_relay || !WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs)) { Branch (3827:13): [True: 0, False: 0]
Branch (3827:13): [True: 0, False: 0]
Branch (3827:26): [True: 0, False: 0]
|
3828 | 0 | LogDebug(BCLog::NET, "sendtxrcncl received which indicated no tx relay to us, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
3829 | 0 | pfrom.fDisconnect = true; |
3830 | 0 | return; |
3831 | 0 | } |
3832 | | |
3833 | 0 | uint32_t peer_txreconcl_version; |
3834 | 0 | uint64_t remote_salt; |
3835 | 0 | vRecv >> peer_txreconcl_version >> remote_salt; |
3836 | |
|
3837 | 0 | const ReconciliationRegisterResult result = m_txreconciliation->RegisterPeer(pfrom.GetId(), pfrom.IsInboundConn(), |
3838 | 0 | peer_txreconcl_version, remote_salt); |
3839 | 0 | switch (result) { Branch (3839:17): [True: 0, False: 0]
|
3840 | 0 | case ReconciliationRegisterResult::NOT_FOUND: Branch (3840:9): [True: 0, False: 0]
|
3841 | 0 | LogDebug(BCLog::NET, "Ignore unexpected txreconciliation signal from peer=%d\n", pfrom.GetId()); |
3842 | 0 | break; |
3843 | 0 | case ReconciliationRegisterResult::SUCCESS: Branch (3843:9): [True: 0, False: 0]
|
3844 | 0 | break; |
3845 | 0 | case ReconciliationRegisterResult::ALREADY_REGISTERED: Branch (3845:9): [True: 0, False: 0]
|
3846 | 0 | LogDebug(BCLog::NET, "txreconciliation protocol violation (sendtxrcncl received from already registered peer), %s\n", pfrom.DisconnectMsg(fLogIPs)); |
3847 | 0 | pfrom.fDisconnect = true; |
3848 | 0 | return; |
3849 | 0 | case ReconciliationRegisterResult::PROTOCOL_VIOLATION: Branch (3849:9): [True: 0, False: 0]
|
3850 | 0 | LogDebug(BCLog::NET, "txreconciliation protocol violation, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
3851 | 0 | pfrom.fDisconnect = true; |
3852 | 0 | return; |
3853 | 0 | } |
3854 | 0 | return; |
3855 | 0 | } |
3856 | | |
3857 | 5.15M | if (!pfrom.fSuccessfullyConnected) { Branch (3857:9): [True: 0, False: 5.15M]
|
3858 | 0 | LogDebug(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId()); |
3859 | 0 | return; |
3860 | 0 | } |
3861 | | |
3862 | 5.15M | if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) { Branch (3862:9): [True: 3.51k, False: 5.15M]
Branch (3862:41): [True: 844, False: 5.15M]
|
3863 | 4.35k | const auto ser_params{ |
3864 | 4.35k | msg_type == NetMsgType::ADDRV2 ? Branch (3864:13): [True: 844, False: 3.51k]
|
3865 | | // Set V2 param so that the CNetAddr and CAddress |
3866 | | // unserialize methods know that an address in v2 format is coming. |
3867 | 844 | CAddress::V2_NETWORK : |
3868 | 4.35k | CAddress::V1_NETWORK, |
3869 | 4.35k | }; |
3870 | | |
3871 | 4.35k | std::vector<CAddress> vAddr; |
3872 | | |
3873 | 4.35k | vRecv >> ser_params(vAddr); |
3874 | | |
3875 | 4.35k | if (!SetupAddressRelay(pfrom, *peer)) { Branch (3875:13): [True: 0, False: 4.35k]
|
3876 | 0 | LogDebug(BCLog::NET, "ignoring %s message from %s peer=%d\n", msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId()); |
3877 | 0 | return; |
3878 | 0 | } |
3879 | | |
3880 | 4.35k | if (vAddr.size() > MAX_ADDR_TO_SEND) Branch (3880:13): [True: 0, False: 4.35k]
|
3881 | 0 | { |
3882 | 0 | Misbehaving(*peer, strprintf("%s message size = %u", msg_type, vAddr.size())); |
3883 | 0 | return; |
3884 | 0 | } |
3885 | | |
3886 | | // Store the new addresses |
3887 | 4.35k | std::vector<CAddress> vAddrOk; |
3888 | 4.35k | const auto current_a_time{Now<NodeSeconds>()}; |
3889 | | |
3890 | | // Update/increment addr rate limiting bucket. |
3891 | 4.35k | const auto current_time{GetTime<std::chrono::microseconds>()}; |
3892 | 4.35k | if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) { Branch (3892:13): [True: 2.22k, False: 2.13k]
|
3893 | | // Don't increment bucket if it's already full |
3894 | 2.22k | const auto time_diff = std::max(current_time - peer->m_addr_token_timestamp, 0us); |
3895 | 2.22k | const double increment = Ticks<SecondsDouble>(time_diff) * MAX_ADDR_RATE_PER_SECOND; |
3896 | 2.22k | peer->m_addr_token_bucket = std::min<double>(peer->m_addr_token_bucket + increment, MAX_ADDR_PROCESSING_TOKEN_BUCKET); |
3897 | 2.22k | } |
3898 | 4.35k | peer->m_addr_token_timestamp = current_time; |
3899 | | |
3900 | 4.35k | const bool rate_limited = !pfrom.HasPermission(NetPermissionFlags::Addr); |
3901 | 4.35k | uint64_t num_proc = 0; |
3902 | 4.35k | uint64_t num_rate_limit = 0; |
3903 | 4.35k | std::shuffle(vAddr.begin(), vAddr.end(), m_rng); |
3904 | 4.35k | for (CAddress& addr : vAddr) Branch (3904:29): [True: 5.97k, False: 4.35k]
|
3905 | 5.97k | { |
3906 | 5.97k | if (interruptMsgProc) Branch (3906:17): [True: 0, False: 5.97k]
|
3907 | 0 | return; |
3908 | | |
3909 | | // Apply rate limiting. |
3910 | 5.97k | if (peer->m_addr_token_bucket < 1.0) { Branch (3910:17): [True: 28, False: 5.95k]
|
3911 | 28 | if (rate_limited) { Branch (3911:21): [True: 28, False: 0]
|
3912 | 28 | ++num_rate_limit; |
3913 | 28 | continue; |
3914 | 28 | } |
3915 | 5.95k | } else { |
3916 | 5.95k | peer->m_addr_token_bucket -= 1.0; |
3917 | 5.95k | } |
3918 | | // We only bother storing full nodes, though this may include |
3919 | | // things which we would not make an outbound connection to, in |
3920 | | // part because we may make feeler connections to them. |
3921 | 5.95k | if (!MayHaveUsefulAddressDB(addr.nServices) && !HasAllDesirableServiceFlags(addr.nServices)) Branch (3921:17): [True: 292, False: 5.65k]
Branch (3921:60): [True: 292, False: 0]
|
3922 | 292 | continue; |
3923 | | |
3924 | 5.65k | if (addr.nTime <= NodeSeconds{100000000s} || addr.nTime > current_a_time + 10min) { Branch (3924:17): [True: 287, False: 5.37k]
Branch (3924:17): [True: 2.24k, False: 3.41k]
Branch (3924:58): [True: 1.95k, False: 3.41k]
|
3925 | 2.24k | addr.nTime = current_a_time - 5 * 24h; |
3926 | 2.24k | } |
3927 | 5.65k | AddAddressKnown(*peer, addr); |
3928 | 5.65k | if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) { Branch (3928:17): [True: 5.65k, False: 0]
Branch (3928:30): [True: 0, False: 5.65k]
Branch (3928:63): [True: 0, False: 5.65k]
|
3929 | | // Do not process banned/discouraged addresses beyond remembering we received them |
3930 | 0 | continue; |
3931 | 0 | } |
3932 | 5.65k | ++num_proc; |
3933 | 5.65k | const bool reachable{g_reachable_nets.Contains(addr)}; |
3934 | 5.65k | if (addr.nTime > current_a_time - 10min && !peer->m_getaddr_sent && vAddr.size() <= 10 && addr.IsRoutable()) { Branch (3934:17): [True: 79, False: 5.57k]
Branch (3934:17): [True: 69, False: 5.58k]
Branch (3934:56): [True: 73, False: 6]
Branch (3934:81): [True: 73, False: 0]
Branch (3934:103): [True: 69, False: 4]
|
3935 | | // Relay to a limited number of other nodes |
3936 | 69 | RelayAddress(pfrom.GetId(), addr, reachable); |
3937 | 69 | } |
3938 | | // Do not store addresses outside our network |
3939 | 5.65k | if (reachable) { Branch (3939:17): [True: 5.64k, False: 13]
|
3940 | 5.64k | vAddrOk.push_back(addr); |
3941 | 5.64k | } |
3942 | 5.65k | } |
3943 | 4.35k | peer->m_addr_processed += num_proc; |
3944 | 4.35k | peer->m_addr_rate_limited += num_rate_limit; |
3945 | 4.35k | LogDebug(BCLog::NET, "Received addr: %u addresses (%u processed, %u rate-limited) from peer=%d\n", |
3946 | 4.35k | vAddr.size(), num_proc, num_rate_limit, pfrom.GetId()); |
3947 | | |
3948 | 4.35k | m_addrman.Add(vAddrOk, pfrom.addr, 2h); |
3949 | 4.35k | if (vAddr.size() < 1000) peer->m_getaddr_sent = false; Branch (3949:13): [True: 3.18k, False: 1.16k]
|
3950 | | |
3951 | | // AddrFetch: Require multiple addresses to avoid disconnecting on self-announcements |
3952 | 4.35k | if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) { Branch (3952:13): [True: 0, False: 4.35k]
Branch (3952:40): [True: 0, False: 0]
|
3953 | 0 | LogDebug(BCLog::NET, "addrfetch connection completed, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
3954 | 0 | pfrom.fDisconnect = true; |
3955 | 0 | } |
3956 | 4.35k | return; |
3957 | 4.35k | } |
3958 | | |
3959 | 5.15M | if (msg_type == NetMsgType::INV) { Branch (3959:9): [True: 465k, False: 4.68M]
|
3960 | 465k | std::vector<CInv> vInv; |
3961 | 465k | vRecv >> vInv; |
3962 | 465k | if (vInv.size() > MAX_INV_SZ) Branch (3962:13): [True: 0, False: 465k]
|
3963 | 0 | { |
3964 | 0 | Misbehaving(*peer, strprintf("inv message size = %u", vInv.size())); |
3965 | 0 | return; |
3966 | 0 | } |
3967 | | |
3968 | 465k | const bool reject_tx_invs{RejectIncomingTxs(pfrom)}; |
3969 | | |
3970 | 465k | LOCK2(cs_main, m_tx_download_mutex); |
3971 | | |
3972 | 465k | const auto current_time{GetTime<std::chrono::microseconds>()}; |
3973 | 465k | uint256* best_block{nullptr}; |
3974 | | |
3975 | 608k | for (CInv& inv : vInv) { Branch (3975:24): [True: 608k, False: 465k]
|
3976 | 608k | if (interruptMsgProc) return; Branch (3976:17): [True: 0, False: 608k]
|
3977 | | |
3978 | | // Ignore INVs that don't match wtxidrelay setting. |
3979 | | // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting. |
3980 | | // This is fine as no INV messages are involved in that process. |
3981 | 608k | if (peer->m_wtxid_relay) { Branch (3981:17): [True: 608k, False: 0]
|
3982 | 608k | if (inv.IsMsgTx()) continue; Branch (3982:21): [True: 49.4k, False: 559k]
|
3983 | 608k | } else { |
3984 | 0 | if (inv.IsMsgWtx()) continue; Branch (3984:21): [True: 0, False: 0]
|
3985 | 0 | } |
3986 | | |
3987 | 559k | if (inv.IsMsgBlk()) { Branch (3987:17): [True: 91.6k, False: 467k]
|
3988 | 91.6k | const bool fAlreadyHave = AlreadyHaveBlock(inv.hash); |
3989 | 91.6k | LogDebug(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); |
3990 | | |
3991 | 91.6k | UpdateBlockAvailability(pfrom.GetId(), inv.hash); |
3992 | 91.6k | if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() && !IsBlockRequested(inv.hash)) { Branch (3992:21): [True: 509, False: 91.0k]
Branch (3992:38): [True: 509, False: 0]
Branch (3992:80): [True: 509, False: 0]
|
3993 | | // Headers-first is the primary method of announcement on |
3994 | | // the network. If a node fell back to sending blocks by |
3995 | | // inv, it may be for a re-org, or because we haven't |
3996 | | // completed initial headers sync. The final block hash |
3997 | | // provided should be the highest, so send a getheaders and |
3998 | | // then fetch the blocks we need to catch up. |
3999 | 509 | best_block = &inv.hash; |
4000 | 509 | } |
4001 | 467k | } else if (inv.IsGenTxMsg()) { Branch (4001:24): [True: 465k, False: 2.72k]
|
4002 | 465k | if (reject_tx_invs) { Branch (4002:21): [True: 0, False: 465k]
|
4003 | 0 | LogDebug(BCLog::NET, "transaction (%s) inv sent in violation of protocol, %s\n", inv.hash.ToString(), pfrom.DisconnectMsg(fLogIPs)); |
4004 | 0 | pfrom.fDisconnect = true; |
4005 | 0 | return; |
4006 | 0 | } |
4007 | 465k | const GenTxid gtxid = ToGenTxid(inv); |
4008 | 465k | AddKnownTx(*peer, inv.hash); |
4009 | | |
4010 | 465k | if (!m_chainman.IsInitialBlockDownload()) { Branch (4010:21): [True: 465k, False: 0]
|
4011 | 465k | const bool fAlreadyHave{m_txdownloadman.AddTxAnnouncement(pfrom.GetId(), gtxid, current_time)}; |
4012 | 465k | LogDebug(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); |
4013 | 465k | } |
4014 | 465k | } else { |
4015 | 2.72k | LogDebug(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId()); |
4016 | 2.72k | } |
4017 | 559k | } |
4018 | | |
4019 | 465k | if (best_block != nullptr) { Branch (4019:13): [True: 346, False: 464k]
|
4020 | | // If we haven't started initial headers-sync with this peer, then |
4021 | | // consider sending a getheaders now. On initial startup, there's a |
4022 | | // reliability vs bandwidth tradeoff, where we are only trying to do |
4023 | | // initial headers sync with one peer at a time, with a long |
4024 | | // timeout (at which point, if the sync hasn't completed, we will |
4025 | | // disconnect the peer and then choose another). In the meantime, |
4026 | | // as new blocks are found, we are willing to add one new peer per |
4027 | | // block to sync with as well, to sync quicker in the case where |
4028 | | // our initial peer is unresponsive (but less bandwidth than we'd |
4029 | | // use if we turned on sync with all peers). |
4030 | 346 | CNodeState& state{*Assert(State(pfrom.GetId()))}; |
4031 | 346 | if (state.fSyncStarted || (!peer->m_inv_triggered_getheaders_before_sync && *best_block != m_last_block_inv_triggering_headers_sync)) { Branch (4031:17): [True: 346, False: 0]
Branch (4031:40): [True: 0, False: 0]
Branch (4031:89): [True: 0, False: 0]
|
4032 | 346 | if (MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer)) { Branch (4032:21): [True: 308, False: 38]
|
4033 | 308 | LogDebug(BCLog::NET, "getheaders (%d) %s to peer=%d\n", |
4034 | 308 | m_chainman.m_best_header->nHeight, best_block->ToString(), |
4035 | 308 | pfrom.GetId()); |
4036 | 308 | } |
4037 | 346 | if (!state.fSyncStarted) { Branch (4037:21): [True: 0, False: 346]
|
4038 | 0 | peer->m_inv_triggered_getheaders_before_sync = true; |
4039 | | // Update the last block hash that triggered a new headers |
4040 | | // sync, so that we don't turn on headers sync with more |
4041 | | // than 1 new peer every new block. |
4042 | 0 | m_last_block_inv_triggering_headers_sync = *best_block; |
4043 | 0 | } |
4044 | 346 | } |
4045 | 346 | } |
4046 | | |
4047 | 465k | return; |
4048 | 465k | } |
4049 | | |
4050 | 4.68M | if (msg_type == NetMsgType::GETDATA) { Branch (4050:9): [True: 6.10k, False: 4.68M]
|
4051 | 6.10k | std::vector<CInv> vInv; |
4052 | 6.10k | vRecv >> vInv; |
4053 | 6.10k | if (vInv.size() > MAX_INV_SZ) Branch (4053:13): [True: 0, False: 6.10k]
|
4054 | 0 | { |
4055 | 0 | Misbehaving(*peer, strprintf("getdata message size = %u", vInv.size())); |
4056 | 0 | return; |
4057 | 0 | } |
4058 | | |
4059 | 6.10k | LogDebug(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId()); |
4060 | | |
4061 | 6.10k | if (vInv.size() > 0) { Branch (4061:13): [True: 5.82k, False: 284]
|
4062 | 5.82k | LogDebug(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId()); |
4063 | 5.82k | } |
4064 | | |
4065 | 6.10k | { |
4066 | 6.10k | LOCK(peer->m_getdata_requests_mutex); |
4067 | 6.10k | peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end()); |
4068 | 6.10k | ProcessGetData(pfrom, *peer, interruptMsgProc); |
4069 | 6.10k | } |
4070 | | |
4071 | 6.10k | return; |
4072 | 6.10k | } |
4073 | | |
4074 | 4.68M | if (msg_type == NetMsgType::GETBLOCKS) { Branch (4074:9): [True: 257, False: 4.68M]
|
4075 | 257 | CBlockLocator locator; |
4076 | 257 | uint256 hashStop; |
4077 | 257 | vRecv >> locator >> hashStop; |
4078 | | |
4079 | 257 | if (locator.vHave.size() > MAX_LOCATOR_SZ) { Branch (4079:13): [True: 0, False: 257]
|
4080 | 0 | LogDebug(BCLog::NET, "getblocks locator size %lld > %d, %s\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.DisconnectMsg(fLogIPs)); |
4081 | 0 | pfrom.fDisconnect = true; |
4082 | 0 | return; |
4083 | 0 | } |
4084 | | |
4085 | | // We might have announced the currently-being-connected tip using a |
4086 | | // compact block, which resulted in the peer sending a getblocks |
4087 | | // request, which we would otherwise respond to without the new block. |
4088 | | // To avoid this situation we simply verify that we are on our best |
4089 | | // known chain now. This is super overkill, but we handle it better |
4090 | | // for getheaders requests, and there are no known nodes which support |
4091 | | // compact blocks but still use getblocks to request blocks. |
4092 | 257 | { |
4093 | 257 | std::shared_ptr<const CBlock> a_recent_block; |
4094 | 257 | { |
4095 | 257 | LOCK(m_most_recent_block_mutex); |
4096 | 257 | a_recent_block = m_most_recent_block; |
4097 | 257 | } |
4098 | 257 | BlockValidationState state; |
4099 | 257 | if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) { Branch (4099:17): [True: 0, False: 257]
|
4100 | 0 | LogDebug(BCLog::NET, "failed to activate chain (%s)\n", state.ToString()); |
4101 | 0 | } |
4102 | 257 | } |
4103 | | |
4104 | 257 | LOCK(cs_main); |
4105 | | |
4106 | | // Find the last block the caller has in the main chain |
4107 | 257 | const CBlockIndex* pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator); |
4108 | | |
4109 | | // Send the rest of the chain |
4110 | 257 | if (pindex) Branch (4110:13): [True: 49, False: 208]
|
4111 | 49 | pindex = m_chainman.ActiveChain().Next(pindex); |
4112 | 257 | int nLimit = 500; |
4113 | 257 | LogDebug(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId()); |
4114 | 10.0k | for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) Branch (4114:16): [True: 9.80k, False: 257]
|
4115 | 9.80k | { |
4116 | 9.80k | if (pindex->GetBlockHash() == hashStop) Branch (4116:17): [True: 0, False: 9.80k]
|
4117 | 0 | { |
4118 | 0 | LogDebug(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); |
4119 | 0 | break; |
4120 | 0 | } |
4121 | | // If pruning, don't inv blocks unless we have on disk and are likely to still have |
4122 | | // for some reasonable time window (1 hour) that block relay might require. |
4123 | 9.80k | const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing; |
4124 | 9.80k | if (m_chainman.m_blockman.IsPruneMode() && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave)) { Branch (4124:17): [True: 0, False: 9.80k]
Branch (4124:57): [True: 0, False: 0]
Branch (4124:97): [True: 0, False: 0]
|
4125 | 0 | LogDebug(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); |
4126 | 0 | break; |
4127 | 0 | } |
4128 | 9.80k | WITH_LOCK(peer->m_block_inv_mutex, peer->m_blocks_for_inv_relay.push_back(pindex->GetBlockHash())); |
4129 | 9.80k | if (--nLimit <= 0) { Branch (4129:17): [True: 0, False: 9.80k]
|
4130 | | // When this block is requested, we'll send an inv that'll |
4131 | | // trigger the peer to getblocks the next batch of inventory. |
4132 | 0 | LogDebug(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); |
4133 | 0 | WITH_LOCK(peer->m_block_inv_mutex, {peer->m_continuation_block = pindex->GetBlockHash();}); |
4134 | 0 | break; |
4135 | 0 | } |
4136 | 9.80k | } |
4137 | 257 | return; |
4138 | 257 | } |
4139 | | |
4140 | 4.68M | if (msg_type == NetMsgType::GETBLOCKTXN) { Branch (4140:9): [True: 222, False: 4.68M]
|
4141 | 222 | BlockTransactionsRequest req; |
4142 | 222 | vRecv >> req; |
4143 | | |
4144 | 222 | std::shared_ptr<const CBlock> recent_block; |
4145 | 222 | { |
4146 | 222 | LOCK(m_most_recent_block_mutex); |
4147 | 222 | if (m_most_recent_block_hash == req.blockhash) Branch (4147:17): [True: 0, False: 222]
|
4148 | 0 | recent_block = m_most_recent_block; |
4149 | | // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion |
4150 | 222 | } |
4151 | 222 | if (recent_block) { Branch (4151:13): [True: 0, False: 222]
|
4152 | 0 | SendBlockTransactions(pfrom, *peer, *recent_block, req); |
4153 | 0 | return; |
4154 | 0 | } |
4155 | | |
4156 | 222 | FlatFilePos block_pos{}; |
4157 | 222 | { |
4158 | 222 | LOCK(cs_main); |
4159 | | |
4160 | 222 | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(req.blockhash); |
4161 | 222 | if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) { Branch (4161:17): [True: 222, False: 0]
Branch (4161:28): [True: 0, False: 0]
|
4162 | 52 | LogDebug(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId()); |
4163 | 52 | return; |
4164 | 52 | } |
4165 | | |
4166 | 170 | if (pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) { Branch (4166:17): [True: 0, False: 170]
|
4167 | 0 | block_pos = pindex->GetBlockPos(); |
4168 | 0 | } |
4169 | 170 | } |
4170 | | |
4171 | 170 | if (!block_pos.IsNull()) { Branch (4171:13): [True: 0, False: 170]
|
4172 | 0 | CBlock block; |
4173 | 0 | const bool ret{m_chainman.m_blockman.ReadBlock(block, block_pos)}; |
4174 | | // If height is above MAX_BLOCKTXN_DEPTH then this block cannot get |
4175 | | // pruned after we release cs_main above, so this read should never fail. |
4176 | 0 | assert(ret); Branch (4176:13): [True: 0, False: 0]
|
4177 | | |
4178 | 0 | SendBlockTransactions(pfrom, *peer, block, req); |
4179 | 0 | return; |
4180 | 0 | } |
4181 | | |
4182 | | // If an older block is requested (should never happen in practice, |
4183 | | // but can happen in tests) send a block response instead of a |
4184 | | // blocktxn response. Sending a full block response instead of a |
4185 | | // small blocktxn response is preferable in the case where a peer |
4186 | | // might maliciously send lots of getblocktxn requests to trigger |
4187 | | // expensive disk reads, because it will require the peer to |
4188 | | // actually receive all the data read from disk over the network. |
4189 | 170 | LogDebug(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH); |
4190 | 170 | CInv inv{MSG_WITNESS_BLOCK, req.blockhash}; |
4191 | 170 | WITH_LOCK(peer->m_getdata_requests_mutex, peer->m_getdata_requests.push_back(inv)); |
4192 | | // The message processing loop will go around again (without pausing) and we'll respond then |
4193 | 170 | return; |
4194 | 170 | } |
4195 | | |
4196 | 4.68M | if (msg_type == NetMsgType::GETHEADERS) { Branch (4196:9): [True: 323, False: 4.68M]
|
4197 | 323 | CBlockLocator locator; |
4198 | 323 | uint256 hashStop; |
4199 | 323 | vRecv >> locator >> hashStop; |
4200 | | |
4201 | 323 | if (locator.vHave.size() > MAX_LOCATOR_SZ) { Branch (4201:13): [True: 0, False: 323]
|
4202 | 0 | LogDebug(BCLog::NET, "getheaders locator size %lld > %d, %s\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.DisconnectMsg(fLogIPs)); |
4203 | 0 | pfrom.fDisconnect = true; |
4204 | 0 | return; |
4205 | 0 | } |
4206 | | |
4207 | 323 | if (m_chainman.m_blockman.LoadingBlocks()) { Branch (4207:13): [True: 0, False: 323]
|
4208 | 0 | LogDebug(BCLog::NET, "Ignoring getheaders from peer=%d while importing/reindexing\n", pfrom.GetId()); |
4209 | 0 | return; |
4210 | 0 | } |
4211 | | |
4212 | 323 | LOCK(cs_main); |
4213 | | |
4214 | | // Don't serve headers from our active chain until our chainwork is at least |
4215 | | // the minimum chain work. This prevents us from starting a low-work headers |
4216 | | // sync that will inevitably be aborted by our peer. |
4217 | 323 | if (m_chainman.ActiveTip() == nullptr || Branch (4217:13): [True: 260, False: 63]
|
4218 | 323 | (m_chainman.ActiveTip()->nChainWork < m_chainman.MinimumChainWork() && !pfrom.HasPermission(NetPermissionFlags::Download))) { Branch (4218:18): [True: 0, False: 63]
Branch (4218:88): [True: 0, False: 0]
|
4219 | 0 | LogDebug(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work; sending empty response\n", pfrom.GetId()); |
4220 | | // Just respond with an empty headers message, to tell the peer to |
4221 | | // go away but not treat us as unresponsive. |
4222 | 0 | MakeAndPushMessage(pfrom, NetMsgType::HEADERS, std::vector<CBlockHeader>()); |
4223 | 0 | return; |
4224 | 0 | } |
4225 | | |
4226 | 323 | CNodeState *nodestate = State(pfrom.GetId()); |
4227 | 323 | const CBlockIndex* pindex = nullptr; |
4228 | 323 | if (locator.IsNull()) Branch (4228:13): [True: 38, False: 285]
|
4229 | 38 | { |
4230 | | // If locator is null, return the hashStop block |
4231 | 38 | pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop); |
4232 | 38 | if (!pindex) { Branch (4232:17): [True: 38, False: 0]
|
4233 | 38 | return; |
4234 | 38 | } |
4235 | | |
4236 | 0 | if (!BlockRequestAllowed(pindex)) { Branch (4236:17): [True: 0, False: 0]
|
4237 | 0 | LogDebug(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom.GetId()); |
4238 | 0 | return; |
4239 | 0 | } |
4240 | 0 | } |
4241 | 285 | else |
4242 | 285 | { |
4243 | | // Find the last block the caller has in the main chain |
4244 | 285 | pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator); |
4245 | 285 | if (pindex) Branch (4245:17): [True: 25, False: 260]
|
4246 | 25 | pindex = m_chainman.ActiveChain().Next(pindex); |
4247 | 285 | } |
4248 | | |
4249 | | // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end |
4250 | 285 | std::vector<CBlock> vHeaders; |
4251 | 285 | int nLimit = m_opts.max_headers_result; |
4252 | 285 | LogDebug(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId()); |
4253 | 5.28k | for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) Branch (4253:16): [True: 5.00k, False: 285]
|
4254 | 5.00k | { |
4255 | 5.00k | vHeaders.emplace_back(pindex->GetBlockHeader()); |
4256 | 5.00k | if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) Branch (4256:17): [True: 0, False: 5.00k]
Branch (4256:17): [True: 0, False: 5.00k]
Branch (4256:34): [True: 0, False: 5.00k]
|
4257 | 0 | break; |
4258 | 5.00k | } |
4259 | | // pindex can be nullptr either if we sent m_chainman.ActiveChain().Tip() OR |
4260 | | // if our peer has m_chainman.ActiveChain().Tip() (and thus we are sending an empty |
4261 | | // headers message). In both cases it's safe to update |
4262 | | // pindexBestHeaderSent to be our tip. |
4263 | | // |
4264 | | // It is important that we simply reset the BestHeaderSent value here, |
4265 | | // and not max(BestHeaderSent, newHeaderSent). We might have announced |
4266 | | // the currently-being-connected tip using a compact block, which |
4267 | | // resulted in the peer sending a headers request, which we respond to |
4268 | | // without the new block. By resetting the BestHeaderSent, we ensure we |
4269 | | // will re-announce the new block via headers (or compact blocks again) |
4270 | | // in the SendMessages logic. |
4271 | 285 | nodestate->pindexBestHeaderSent = pindex ? pindex : m_chainman.ActiveChain().Tip(); Branch (4271:43): [True: 0, False: 285]
|
4272 | 285 | MakeAndPushMessage(pfrom, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders)); |
4273 | 285 | return; |
4274 | 323 | } |
4275 | | |
4276 | 4.68M | if (msg_type == NetMsgType::TX) { Branch (4276:9): [True: 390k, False: 4.29M]
|
4277 | 390k | if (RejectIncomingTxs(pfrom)) { Branch (4277:13): [True: 0, False: 390k]
|
4278 | 0 | LogDebug(BCLog::NET, "transaction sent in violation of protocol, %s", pfrom.DisconnectMsg(fLogIPs)); |
4279 | 0 | pfrom.fDisconnect = true; |
4280 | 0 | return; |
4281 | 0 | } |
4282 | | |
4283 | | // Stop processing the transaction early if we are still in IBD since we don't |
4284 | | // have enough information to validate it yet. Sending unsolicited transactions |
4285 | | // is not considered a protocol violation, so don't punish the peer. |
4286 | 390k | if (m_chainman.IsInitialBlockDownload()) return; Branch (4286:13): [True: 0, False: 390k]
|
4287 | | |
4288 | 390k | CTransactionRef ptx; |
4289 | 390k | vRecv >> TX_WITH_WITNESS(ptx); |
4290 | 390k | const CTransaction& tx = *ptx; |
4291 | | |
4292 | 390k | const uint256& txid = ptx->GetHash(); |
4293 | 390k | const uint256& wtxid = ptx->GetWitnessHash(); |
4294 | | |
4295 | 390k | const uint256& hash = peer->m_wtxid_relay ? wtxid : txid; Branch (4295:31): [True: 389k, False: 1.04k]
|
4296 | 390k | AddKnownTx(*peer, hash); |
4297 | | |
4298 | 390k | LOCK2(cs_main, m_tx_download_mutex); |
4299 | | |
4300 | 390k | const auto& [should_validate, package_to_validate] = m_txdownloadman.ReceivedTx(pfrom.GetId(), ptx); |
4301 | 390k | if (!should_validate) { Branch (4301:13): [True: 10.2k, False: 379k]
|
4302 | 10.2k | if (pfrom.HasPermission(NetPermissionFlags::ForceRelay)) { Branch (4302:17): [True: 0, False: 10.2k]
|
4303 | | // Always relay transactions received from peers with forcerelay |
4304 | | // permission, even if they were already in the mempool, allowing |
4305 | | // the node to function as a gateway for nodes hidden behind it. |
4306 | 0 | if (!m_mempool.exists(GenTxid::Txid(tx.GetHash()))) { Branch (4306:21): [True: 0, False: 0]
|
4307 | 0 | LogPrintf("Not relaying non-mempool transaction %s (wtxid=%s) from forcerelay peer=%d\n", |
4308 | 0 | tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), pfrom.GetId()); |
4309 | 0 | } else { |
4310 | 0 | LogPrintf("Force relaying tx %s (wtxid=%s) from peer=%d\n", |
4311 | 0 | tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), pfrom.GetId()); |
4312 | 0 | RelayTransaction(tx.GetHash(), tx.GetWitnessHash()); |
4313 | 0 | } |
4314 | 0 | } |
4315 | | |
4316 | 10.2k | if (package_to_validate) { Branch (4316:17): [True: 385, False: 9.91k]
|
4317 | 385 | const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)}; |
4318 | 385 | LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(), |
4319 | 385 | package_result.m_state.IsValid() ? "package accepted" : "package rejected"); |
4320 | 385 | ProcessPackageResult(package_to_validate.value(), package_result); |
4321 | 385 | } |
4322 | 10.2k | return; |
4323 | 10.2k | } |
4324 | | |
4325 | | // ReceivedTx should not be telling us to validate the tx and a package. |
4326 | 379k | Assume(!package_to_validate.has_value()); |
4327 | | |
4328 | 379k | const MempoolAcceptResult result = m_chainman.ProcessTransaction(ptx); |
4329 | 379k | const TxValidationState& state = result.m_state; |
4330 | | |
4331 | 379k | if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) { Branch (4331:13): [True: 35.1k, False: 344k]
|
4332 | 35.1k | ProcessValidTx(pfrom.GetId(), ptx, result.m_replaced_transactions); |
4333 | 35.1k | pfrom.m_last_tx_time = GetTime<std::chrono::seconds>(); |
4334 | 35.1k | } |
4335 | 379k | if (state.IsInvalid()) { Branch (4335:13): [True: 343k, False: 36.1k]
|
4336 | 343k | if (auto package_to_validate{ProcessInvalidTx(pfrom.GetId(), ptx, state, /*first_time_failure=*/true)}) { Branch (4336:22): [True: 2.15k, False: 341k]
|
4337 | 2.15k | const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)}; |
4338 | 2.15k | LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(), |
4339 | 2.15k | package_result.m_state.IsValid() ? "package accepted" : "package rejected"); |
4340 | 2.15k | ProcessPackageResult(package_to_validate.value(), package_result); |
4341 | 2.15k | } |
4342 | 343k | } |
4343 | | |
4344 | 379k | return; |
4345 | 390k | } |
4346 | | |
4347 | 4.29M | if (msg_type == NetMsgType::CMPCTBLOCK) Branch (4347:9): [True: 269, False: 4.29M]
|
4348 | 269 | { |
4349 | | // Ignore cmpctblock received while importing |
4350 | 269 | if (m_chainman.m_blockman.LoadingBlocks()) { Branch (4350:13): [True: 0, False: 269]
|
4351 | 0 | LogDebug(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId()); |
4352 | 0 | return; |
4353 | 0 | } |
4354 | | |
4355 | 269 | CBlockHeaderAndShortTxIDs cmpctblock; |
4356 | 269 | vRecv >> cmpctblock; |
4357 | | |
4358 | 269 | bool received_new_header = false; |
4359 | 269 | const auto blockhash = cmpctblock.header.GetHash(); |
4360 | | |
4361 | 269 | { |
4362 | 269 | LOCK(cs_main); |
4363 | | |
4364 | 269 | const CBlockIndex* prev_block = m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock); |
4365 | 269 | if (!prev_block) { Branch (4365:13): [True: 16, False: 253]
|
4366 | | // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers |
4367 | 16 | if (!m_chainman.IsInitialBlockDownload()) { Branch (4367:17): [True: 16, False: 0]
|
4368 | 16 | MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer); |
4369 | 16 | } |
4370 | 16 | return; |
4371 | 253 | } else if (prev_block->nChainWork + CalculateClaimedHeadersWork({{cmpctblock.header}}) < GetAntiDoSWorkThreshold()) { Branch (4371:20): [True: 0, False: 253]
|
4372 | | // If we get a low-work header in a compact block, we can ignore it. |
4373 | 0 | LogDebug(BCLog::NET, "Ignoring low-work compact block from peer %d\n", pfrom.GetId()); |
4374 | 0 | return; |
4375 | 0 | } |
4376 | | |
4377 | 253 | if (!m_chainman.m_blockman.LookupBlockIndex(blockhash)) { Branch (4377:13): [True: 0, False: 253]
|
4378 | 0 | received_new_header = true; |
4379 | 0 | } |
4380 | 253 | } |
4381 | | |
4382 | 0 | const CBlockIndex *pindex = nullptr; |
4383 | 253 | BlockValidationState state; |
4384 | 253 | if (!m_chainman.ProcessNewBlockHeaders({{cmpctblock.header}}, /*min_pow_checked=*/true, state, &pindex)) { Branch (4384:13): [True: 0, False: 253]
|
4385 | 0 | if (state.IsInvalid()) { Branch (4385:17): [True: 0, False: 0]
|
4386 | 0 | MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block=*/true, "invalid header via cmpctblock"); |
4387 | 0 | return; |
4388 | 0 | } |
4389 | 0 | } |
4390 | | |
4391 | | // If AcceptBlockHeader returned true, it set pindex |
4392 | 253 | Assert(pindex); |
4393 | 253 | if (received_new_header) { Branch (4393:13): [True: 0, False: 253]
|
4394 | 0 | LogBlockHeader(*pindex, pfrom, /*via_compact_block=*/true); |
4395 | 0 | } |
4396 | | |
4397 | 253 | bool fProcessBLOCKTXN = false; |
4398 | | |
4399 | | // If we end up treating this as a plain headers message, call that as well |
4400 | | // without cs_main. |
4401 | 253 | bool fRevertToHeaderProcessing = false; |
4402 | | |
4403 | | // Keep a CBlock for "optimistic" compactblock reconstructions (see |
4404 | | // below) |
4405 | 253 | std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); |
4406 | 253 | bool fBlockReconstructed = false; |
4407 | | |
4408 | 253 | { |
4409 | 253 | LOCK(cs_main); |
4410 | 253 | UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash()); |
4411 | | |
4412 | 253 | CNodeState *nodestate = State(pfrom.GetId()); |
4413 | | |
4414 | | // If this was a new header with more work than our tip, update the |
4415 | | // peer's last block announcement time |
4416 | 253 | if (received_new_header && pindex->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) { Branch (4416:13): [True: 0, False: 253]
Branch (4416:36): [True: 0, False: 0]
|
4417 | 0 | nodestate->m_last_block_announcement = GetTime(); |
4418 | 0 | } |
4419 | | |
4420 | 253 | if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here Branch (4420:13): [True: 0, False: 253]
|
4421 | 0 | return; |
4422 | | |
4423 | 253 | auto range_flight = mapBlocksInFlight.equal_range(pindex->GetBlockHash()); |
4424 | 253 | size_t already_in_flight = std::distance(range_flight.first, range_flight.second); |
4425 | 253 | bool requested_block_from_this_peer{false}; |
4426 | | |
4427 | | // Multimap ensures ordering of outstanding requests. It's either empty or first in line. |
4428 | 253 | bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId()); Branch (4428:32): [True: 253, False: 0]
Branch (4428:58): [True: 0, False: 0]
|
4429 | | |
4430 | 253 | while (range_flight.first != range_flight.second) { Branch (4430:16): [True: 0, False: 253]
|
4431 | 0 | if (range_flight.first->second.first == pfrom.GetId()) { Branch (4431:17): [True: 0, False: 0]
|
4432 | 0 | requested_block_from_this_peer = true; |
4433 | 0 | break; |
4434 | 0 | } |
4435 | 0 | range_flight.first++; |
4436 | 0 | } |
4437 | | |
4438 | 253 | if (pindex->nChainWork <= m_chainman.ActiveChain().Tip()->nChainWork || // We know something better Branch (4438:13): [True: 253, False: 0]
|
4439 | 253 | pindex->nTx != 0) { // We had this block at some point, but pruned it Branch (4439:17): [True: 0, False: 0]
|
4440 | 0 | if (requested_block_from_this_peer) { Branch (4440:17): [True: 0, False: 0]
|
4441 | | // We requested this block for some reason, but our mempool will probably be useless |
4442 | | // so we just grab the block via normal getdata |
4443 | 0 | std::vector<CInv> vInv(1); |
4444 | 0 | vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash); |
4445 | 0 | MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv); |
4446 | 0 | } |
4447 | 0 | return; |
4448 | 0 | } |
4449 | | |
4450 | | // If we're not close to tip yet, give up and let parallel block fetch work its magic |
4451 | 253 | if (!already_in_flight && !CanDirectFetch()) { Branch (4451:13): [True: 0, False: 253]
Branch (4451:35): [True: 0, False: 0]
|
4452 | 0 | return; |
4453 | 0 | } |
4454 | | |
4455 | | // We want to be a bit conservative just to be extra careful about DoS |
4456 | | // possibilities in compact block processing... |
4457 | 253 | if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) { Branch (4457:13): [True: 0, False: 253]
|
4458 | 0 | if ((already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK && nodestate->vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || Branch (4458:18): [True: 0, False: 0]
Branch (4458:76): [True: 0, False: 0]
|
4459 | 0 | requested_block_from_this_peer) { Branch (4459:18): [True: 0, False: 0]
|
4460 | 0 | std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr; |
4461 | 0 | if (!BlockRequested(pfrom.GetId(), *pindex, &queuedBlockIt)) { Branch (4461:21): [True: 0, False: 0]
|
4462 | 0 | if (!(*queuedBlockIt)->partialBlock) Branch (4462:25): [True: 0, False: 0]
|
4463 | 0 | (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool)); |
4464 | 0 | else { |
4465 | | // The block was already in flight using compact blocks from the same peer |
4466 | 0 | LogDebug(BCLog::NET, "Peer sent us compact block we were already syncing!\n"); |
4467 | 0 | return; |
4468 | 0 | } |
4469 | 0 | } |
4470 | | |
4471 | 0 | PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock; |
4472 | 0 | ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact); |
4473 | 0 | if (status == READ_STATUS_INVALID) { Branch (4473:21): [True: 0, False: 0]
|
4474 | 0 | RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect |
4475 | 0 | Misbehaving(*peer, "invalid compact block"); |
4476 | 0 | return; |
4477 | 0 | } else if (status == READ_STATUS_FAILED) { Branch (4477:28): [True: 0, False: 0]
|
4478 | 0 | if (first_in_flight) { Branch (4478:25): [True: 0, False: 0]
|
4479 | | // Duplicate txindexes, the block is now in-flight, so just request it |
4480 | 0 | std::vector<CInv> vInv(1); |
4481 | 0 | vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash); |
4482 | 0 | MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv); |
4483 | 0 | } else { |
4484 | | // Give up for this peer and wait for other peer(s) |
4485 | 0 | RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); |
4486 | 0 | } |
4487 | 0 | return; |
4488 | 0 | } |
4489 | | |
4490 | 0 | BlockTransactionsRequest req; |
4491 | 0 | for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) { Branch (4491:36): [True: 0, False: 0]
|
4492 | 0 | if (!partialBlock.IsTxAvailable(i)) Branch (4492:25): [True: 0, False: 0]
|
4493 | 0 | req.indexes.push_back(i); |
4494 | 0 | } |
4495 | 0 | if (req.indexes.empty()) { Branch (4495:21): [True: 0, False: 0]
|
4496 | 0 | fProcessBLOCKTXN = true; |
4497 | 0 | } else if (first_in_flight) { Branch (4497:28): [True: 0, False: 0]
|
4498 | | // We will try to round-trip any compact blocks we get on failure, |
4499 | | // as long as it's first... |
4500 | 0 | req.blockhash = pindex->GetBlockHash(); |
4501 | 0 | MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req); |
4502 | 0 | } else if (pfrom.m_bip152_highbandwidth_to && Branch (4502:28): [True: 0, False: 0]
|
4503 | 0 | (!pfrom.IsInboundConn() || Branch (4503:22): [True: 0, False: 0]
|
4504 | 0 | IsBlockRequestedFromOutbound(blockhash) || Branch (4504:21): [True: 0, False: 0]
|
4505 | 0 | already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK - 1)) { Branch (4505:21): [True: 0, False: 0]
|
4506 | | // ... or it's a hb relay peer and: |
4507 | | // - peer is outbound, or |
4508 | | // - we already have an outbound attempt in flight(so we'll take what we can get), or |
4509 | | // - it's not the final parallel download slot (which we may reserve for first outbound) |
4510 | 0 | req.blockhash = pindex->GetBlockHash(); |
4511 | 0 | MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req); |
4512 | 0 | } else { |
4513 | | // Give up for this peer and wait for other peer(s) |
4514 | 0 | RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); |
4515 | 0 | } |
4516 | 0 | } else { |
4517 | | // This block is either already in flight from a different |
4518 | | // peer, or this peer has too many blocks outstanding to |
4519 | | // download from. |
4520 | | // Optimistically try to reconstruct anyway since we might be |
4521 | | // able to without any round trips. |
4522 | 0 | PartiallyDownloadedBlock tempBlock(&m_mempool); |
4523 | 0 | ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact); |
4524 | 0 | if (status != READ_STATUS_OK) { Branch (4524:21): [True: 0, False: 0]
|
4525 | | // TODO: don't ignore failures |
4526 | 0 | return; |
4527 | 0 | } |
4528 | 0 | std::vector<CTransactionRef> dummy; |
4529 | 0 | status = tempBlock.FillBlock(*pblock, dummy); |
4530 | 0 | if (status == READ_STATUS_OK) { Branch (4530:21): [True: 0, False: 0]
|
4531 | 0 | fBlockReconstructed = true; |
4532 | 0 | } |
4533 | 0 | } |
4534 | 253 | } else { |
4535 | 253 | if (requested_block_from_this_peer) { Branch (4535:17): [True: 0, False: 253]
|
4536 | | // We requested this block, but its far into the future, so our |
4537 | | // mempool will probably be useless - request the block normally |
4538 | 0 | std::vector<CInv> vInv(1); |
4539 | 0 | vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash); |
4540 | 0 | MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv); |
4541 | 0 | return; |
4542 | 253 | } else { |
4543 | | // If this was an announce-cmpctblock, we want the same treatment as a header message |
4544 | 253 | fRevertToHeaderProcessing = true; |
4545 | 253 | } |
4546 | 253 | } |
4547 | 253 | } // cs_main |
4548 | | |
4549 | 253 | if (fProcessBLOCKTXN) { Branch (4549:13): [True: 0, False: 253]
|
4550 | 0 | BlockTransactions txn; |
4551 | 0 | txn.blockhash = blockhash; |
4552 | 0 | return ProcessCompactBlockTxns(pfrom, *peer, txn); |
4553 | 0 | } |
4554 | | |
4555 | 253 | if (fRevertToHeaderProcessing) { Branch (4555:13): [True: 0, False: 253]
|
4556 | | // Headers received from HB compact block peers are permitted to be |
4557 | | // relayed before full validation (see BIP 152), so we don't want to disconnect |
4558 | | // the peer if the header turns out to be for an invalid block. |
4559 | | // Note that if a peer tries to build on an invalid chain, that |
4560 | | // will be detected and the peer will be disconnected/discouraged. |
4561 | 0 | return ProcessHeadersMessage(pfrom, *peer, {cmpctblock.header}, /*via_compact_block=*/true); |
4562 | 0 | } |
4563 | | |
4564 | 253 | if (fBlockReconstructed) { Branch (4564:13): [True: 0, False: 253]
|
4565 | | // If we got here, we were able to optimistically reconstruct a |
4566 | | // block that is in flight from some other peer. |
4567 | 0 | { |
4568 | 0 | LOCK(cs_main); |
4569 | 0 | mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false)); |
4570 | 0 | } |
4571 | | // Setting force_processing to true means that we bypass some of |
4572 | | // our anti-DoS protections in AcceptBlock, which filters |
4573 | | // unrequested blocks that might be trying to waste our resources |
4574 | | // (eg disk space). Because we only try to reconstruct blocks when |
4575 | | // we're close to caught up (via the CanDirectFetch() requirement |
4576 | | // above, combined with the behavior of not requesting blocks until |
4577 | | // we have a chain with at least the minimum chain work), and we ignore |
4578 | | // compact blocks with less work than our tip, it is safe to treat |
4579 | | // reconstructed compact blocks as having been requested. |
4580 | 0 | ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true); |
4581 | 0 | LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid() |
4582 | 0 | if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) { Branch (4582:17): [True: 0, False: 0]
|
4583 | | // Clear download state for this block, which is in |
4584 | | // process from some other peer. We do this after calling |
4585 | | // ProcessNewBlock so that a malleated cmpctblock announcement |
4586 | | // can't be used to interfere with block relay. |
4587 | 0 | RemoveBlockRequest(pblock->GetHash(), std::nullopt); |
4588 | 0 | } |
4589 | 0 | } |
4590 | 253 | return; |
4591 | 253 | } |
4592 | | |
4593 | 4.29M | if (msg_type == NetMsgType::BLOCKTXN) Branch (4593:9): [True: 357, False: 4.28M]
|
4594 | 357 | { |
4595 | | // Ignore blocktxn received while importing |
4596 | 357 | if (m_chainman.m_blockman.LoadingBlocks()) { Branch (4596:13): [True: 0, False: 357]
|
4597 | 0 | LogDebug(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId()); |
4598 | 0 | return; |
4599 | 0 | } |
4600 | | |
4601 | 357 | BlockTransactions resp; |
4602 | 357 | vRecv >> resp; |
4603 | | |
4604 | 357 | return ProcessCompactBlockTxns(pfrom, *peer, resp); |
4605 | 357 | } |
4606 | | |
4607 | 4.28M | if (msg_type == NetMsgType::HEADERS) Branch (4607:9): [True: 21.6k, False: 4.26M]
|
4608 | 21.6k | { |
4609 | | // Ignore headers received while importing |
4610 | 21.6k | if (m_chainman.m_blockman.LoadingBlocks()) { Branch (4610:13): [True: 0, False: 21.6k]
|
4611 | 0 | LogDebug(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId()); |
4612 | 0 | return; |
4613 | 0 | } |
4614 | | |
4615 | 21.6k | std::vector<CBlockHeader> headers; |
4616 | | |
4617 | | // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks. |
4618 | 21.6k | unsigned int nCount = ReadCompactSize(vRecv); |
4619 | 21.6k | if (nCount > m_opts.max_headers_result) { Branch (4619:13): [True: 9, False: 21.6k]
|
4620 | 9 | Misbehaving(*peer, strprintf("headers message size = %u", nCount)); |
4621 | 9 | return; |
4622 | 9 | } |
4623 | 21.6k | headers.resize(nCount); |
4624 | 43.2k | for (unsigned int n = 0; n < nCount; n++) { Branch (4624:34): [True: 21.6k, False: 21.6k]
|
4625 | 21.6k | vRecv >> headers[n]; |
4626 | 21.6k | ReadCompactSize(vRecv); // ignore tx count; assume it is 0. |
4627 | 21.6k | } |
4628 | | |
4629 | 21.6k | ProcessHeadersMessage(pfrom, *peer, std::move(headers), /*via_compact_block=*/false); |
4630 | | |
4631 | | // Check if the headers presync progress needs to be reported to validation. |
4632 | | // This needs to be done without holding the m_headers_presync_mutex lock. |
4633 | 21.6k | if (m_headers_presync_should_signal.exchange(false)) { Branch (4633:13): [True: 0, False: 21.6k]
|
4634 | 0 | HeadersPresyncStats stats; |
4635 | 0 | { |
4636 | 0 | LOCK(m_headers_presync_mutex); |
4637 | 0 | auto it = m_headers_presync_stats.find(m_headers_presync_bestpeer); |
4638 | 0 | if (it != m_headers_presync_stats.end()) stats = it->second; Branch (4638:21): [True: 0, False: 0]
|
4639 | 0 | } |
4640 | 0 | if (stats.second) { Branch (4640:17): [True: 0, False: 0]
|
4641 | 0 | m_chainman.ReportHeadersPresync(stats.first, stats.second->first, stats.second->second); |
4642 | 0 | } |
4643 | 0 | } |
4644 | | |
4645 | 21.6k | return; |
4646 | 21.6k | } |
4647 | | |
4648 | 4.26M | if (msg_type == NetMsgType::BLOCK) Branch (4648:9): [True: 2.24M, False: 2.01M]
|
4649 | 2.24M | { |
4650 | | // Ignore block received while importing |
4651 | 2.24M | if (m_chainman.m_blockman.LoadingBlocks()) { Branch (4651:13): [True: 0, False: 2.24M]
|
4652 | 0 | LogDebug(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId()); |
4653 | 0 | return; |
4654 | 0 | } |
4655 | | |
4656 | 2.24M | std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); |
4657 | 2.24M | vRecv >> TX_WITH_WITNESS(*pblock); |
4658 | | |
4659 | 2.24M | LogDebug(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId()); |
4660 | | |
4661 | 2.24M | const CBlockIndex* prev_block{WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock))}; |
4662 | | |
4663 | | // Check for possible mutation if it connects to something we know so we can check for DEPLOYMENT_SEGWIT being active |
4664 | 2.24M | if (prev_block && IsBlockMutated(/*block=*/*pblock, Branch (4664:13): [True: 2.24M, False: 4.53k]
Branch (4664:27): [True: 158, False: 2.24M]
|
4665 | 2.24M | /*check_witness_root=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT))) { |
4666 | 158 | LogDebug(BCLog::NET, "Received mutated block from peer=%d\n", peer->m_id); |
4667 | 158 | Misbehaving(*peer, "mutated block"); |
4668 | 158 | WITH_LOCK(cs_main, RemoveBlockRequest(pblock->GetHash(), peer->m_id)); |
4669 | 158 | return; |
4670 | 158 | } |
4671 | | |
4672 | 2.24M | bool forceProcessing = false; |
4673 | 2.24M | const uint256 hash(pblock->GetHash()); |
4674 | 2.24M | bool min_pow_checked = false; |
4675 | 2.24M | { |
4676 | 2.24M | LOCK(cs_main); |
4677 | | // Always process the block if we requested it, since we may |
4678 | | // need it even when it's not a candidate for a new best tip. |
4679 | 2.24M | forceProcessing = IsBlockRequested(hash); |
4680 | 2.24M | RemoveBlockRequest(hash, pfrom.GetId()); |
4681 | | // mapBlockSource is only used for punishing peers and setting |
4682 | | // which peers send us compact blocks, so the race between here and |
4683 | | // cs_main in ProcessNewBlock is fine. |
4684 | 2.24M | mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true)); |
4685 | | |
4686 | | // Check claimed work on this block against our anti-dos thresholds. |
4687 | 2.24M | if (prev_block && prev_block->nChainWork + CalculateClaimedHeadersWork({{pblock->GetBlockHeader()}}) >= GetAntiDoSWorkThreshold()) { Branch (4687:17): [True: 2.24M, False: 4.53k]
Branch (4687:17): [True: 2.24M, False: 4.53k]
Branch (4687:31): [True: 2.24M, False: 0]
|
4688 | 2.24M | min_pow_checked = true; |
4689 | 2.24M | } |
4690 | 2.24M | } |
4691 | 2.24M | ProcessBlock(pfrom, pblock, forceProcessing, min_pow_checked); |
4692 | 2.24M | return; |
4693 | 2.24M | } |
4694 | | |
4695 | 2.01M | if (msg_type == NetMsgType::GETADDR) { Branch (4695:9): [True: 524, False: 2.01M]
|
4696 | | // This asymmetric behavior for inbound and outbound connections was introduced |
4697 | | // to prevent a fingerprinting attack: an attacker can send specific fake addresses |
4698 | | // to users' AddrMan and later request them by sending getaddr messages. |
4699 | | // Making nodes which are behind NAT and can only make outgoing connections ignore |
4700 | | // the getaddr message mitigates the attack. |
4701 | 524 | if (!pfrom.IsInboundConn()) { Branch (4701:13): [True: 69, False: 455]
|
4702 | 69 | LogDebug(BCLog::NET, "Ignoring \"getaddr\" from %s connection. peer=%d\n", pfrom.ConnectionTypeAsString(), pfrom.GetId()); |
4703 | 69 | return; |
4704 | 69 | } |
4705 | | |
4706 | | // Since this must be an inbound connection, SetupAddressRelay will |
4707 | | // never fail. |
4708 | 455 | Assume(SetupAddressRelay(pfrom, *peer)); |
4709 | | |
4710 | | // Only send one GetAddr response per connection to reduce resource waste |
4711 | | // and discourage addr stamping of INV announcements. |
4712 | 455 | if (peer->m_getaddr_recvd) { Branch (4712:13): [True: 48, False: 407]
|
4713 | 48 | LogDebug(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId()); |
4714 | 48 | return; |
4715 | 48 | } |
4716 | 407 | peer->m_getaddr_recvd = true; |
4717 | | |
4718 | 407 | peer->m_addrs_to_send.clear(); |
4719 | 407 | std::vector<CAddress> vAddr; |
4720 | 407 | if (pfrom.HasPermission(NetPermissionFlags::Addr)) { Branch (4720:13): [True: 0, False: 407]
|
4721 | 0 | vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND, /*network=*/std::nullopt); |
4722 | 407 | } else { |
4723 | 407 | vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND); |
4724 | 407 | } |
4725 | 414 | for (const CAddress &addr : vAddr) { Branch (4725:35): [True: 414, False: 407]
|
4726 | 414 | PushAddress(*peer, addr); |
4727 | 414 | } |
4728 | 407 | return; |
4729 | 455 | } |
4730 | | |
4731 | 2.01M | if (msg_type == NetMsgType::MEMPOOL) { Branch (4731:9): [True: 239, False: 2.01M]
|
4732 | | // Only process received mempool messages if we advertise NODE_BLOOM |
4733 | | // or if the peer has mempool permissions. |
4734 | 239 | if (!(peer->m_our_services & NODE_BLOOM) && !pfrom.HasPermission(NetPermissionFlags::Mempool)) Branch (4734:13): [True: 0, False: 239]
Branch (4734:53): [True: 0, False: 0]
|
4735 | 0 | { |
4736 | 0 | if (!pfrom.HasPermission(NetPermissionFlags::NoBan)) Branch (4736:17): [True: 0, False: 0]
|
4737 | 0 | { |
4738 | 0 | LogDebug(BCLog::NET, "mempool request with bloom filters disabled, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
4739 | 0 | pfrom.fDisconnect = true; |
4740 | 0 | } |
4741 | 0 | return; |
4742 | 0 | } |
4743 | | |
4744 | 239 | if (m_connman.OutboundTargetReached(false) && !pfrom.HasPermission(NetPermissionFlags::Mempool)) Branch (4744:13): [True: 0, False: 239]
Branch (4744:55): [True: 0, False: 0]
|
4745 | 0 | { |
4746 | 0 | if (!pfrom.HasPermission(NetPermissionFlags::NoBan)) Branch (4746:17): [True: 0, False: 0]
|
4747 | 0 | { |
4748 | 0 | LogDebug(BCLog::NET, "mempool request with bandwidth limit reached, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
4749 | 0 | pfrom.fDisconnect = true; |
4750 | 0 | } |
4751 | 0 | return; |
4752 | 0 | } |
4753 | | |
4754 | 239 | if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { Branch (4754:49): [True: 239, False: 0]
|
4755 | 239 | LOCK(tx_relay->m_tx_inventory_mutex); |
4756 | 239 | tx_relay->m_send_mempool = true; |
4757 | 239 | } |
4758 | 239 | return; |
4759 | 239 | } |
4760 | | |
4761 | 2.01M | if (msg_type == NetMsgType::PING) { Branch (4761:9): [True: 2.01M, False: 4.97k]
|
4762 | 2.01M | if (pfrom.GetCommonVersion() > BIP0031_VERSION) { Branch (4762:13): [True: 2.01M, False: 0]
|
4763 | 2.01M | uint64_t nonce = 0; |
4764 | 2.01M | vRecv >> nonce; |
4765 | | // Echo the message back with the nonce. This allows for two useful features: |
4766 | | // |
4767 | | // 1) A remote node can quickly check if the connection is operational |
4768 | | // 2) Remote nodes can measure the latency of the network thread. If this node |
4769 | | // is overloaded it won't respond to pings quickly and the remote node can |
4770 | | // avoid sending us more work, like chain download requests. |
4771 | | // |
4772 | | // The nonce stops the remote getting confused between different pings: without |
4773 | | // it, if the remote node sends a ping once per second and this node takes 5 |
4774 | | // seconds to respond to each, the 5th ping the remote sends would appear to |
4775 | | // return very quickly. |
4776 | 2.01M | MakeAndPushMessage(pfrom, NetMsgType::PONG, nonce); |
4777 | 2.01M | } |
4778 | 2.01M | return; |
4779 | 2.01M | } |
4780 | | |
4781 | 4.97k | if (msg_type == NetMsgType::PONG) { Branch (4781:9): [True: 366, False: 4.61k]
|
4782 | 366 | const auto ping_end = time_received; |
4783 | 366 | uint64_t nonce = 0; |
4784 | 366 | size_t nAvail = vRecv.in_avail(); |
4785 | 366 | bool bPingFinished = false; |
4786 | 366 | std::string sProblem; |
4787 | | |
4788 | 366 | if (nAvail >= sizeof(nonce)) { Branch (4788:13): [True: 303, False: 63]
|
4789 | 303 | vRecv >> nonce; |
4790 | | |
4791 | | // Only process pong message if there is an outstanding ping (old ping without nonce should never pong) |
4792 | 303 | if (peer->m_ping_nonce_sent != 0) { Branch (4792:17): [True: 295, False: 8]
|
4793 | 295 | if (nonce == peer->m_ping_nonce_sent) { Branch (4793:21): [True: 0, False: 295]
|
4794 | | // Matching pong received, this ping is no longer outstanding |
4795 | 0 | bPingFinished = true; |
4796 | 0 | const auto ping_time = ping_end - peer->m_ping_start.load(); |
4797 | 0 | if (ping_time.count() >= 0) { Branch (4797:25): [True: 0, False: 0]
|
4798 | | // Let connman know about this successful ping-pong |
4799 | 0 | pfrom.PongReceived(ping_time); |
4800 | 0 | } else { |
4801 | | // This should never happen |
4802 | 0 | sProblem = "Timing mishap"; |
4803 | 0 | } |
4804 | 295 | } else { |
4805 | | // Nonce mismatches are normal when pings are overlapping |
4806 | 295 | sProblem = "Nonce mismatch"; |
4807 | 295 | if (nonce == 0) { Branch (4807:25): [True: 8, False: 287]
|
4808 | | // This is most likely a bug in another implementation somewhere; cancel this ping |
4809 | 8 | bPingFinished = true; |
4810 | 8 | sProblem = "Nonce zero"; |
4811 | 8 | } |
4812 | 295 | } |
4813 | 295 | } else { |
4814 | 8 | sProblem = "Unsolicited pong without ping"; |
4815 | 8 | } |
4816 | 303 | } else { |
4817 | | // This is most likely a bug in another implementation somewhere; cancel this ping |
4818 | 63 | bPingFinished = true; |
4819 | 63 | sProblem = "Short payload"; |
4820 | 63 | } |
4821 | | |
4822 | 366 | if (!(sProblem.empty())) { Branch (4822:13): [True: 366, False: 0]
|
4823 | 366 | LogDebug(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n", |
4824 | 366 | pfrom.GetId(), |
4825 | 366 | sProblem, |
4826 | 366 | peer->m_ping_nonce_sent, |
4827 | 366 | nonce, |
4828 | 366 | nAvail); |
4829 | 366 | } |
4830 | 366 | if (bPingFinished) { Branch (4830:13): [True: 71, False: 295]
|
4831 | 71 | peer->m_ping_nonce_sent = 0; |
4832 | 71 | } |
4833 | 366 | return; |
4834 | 366 | } |
4835 | | |
4836 | 4.61k | if (msg_type == NetMsgType::FILTERLOAD) { Branch (4836:9): [True: 511, False: 4.10k]
|
4837 | 511 | if (!(peer->m_our_services & NODE_BLOOM)) { Branch (4837:13): [True: 0, False: 511]
|
4838 | 0 | LogDebug(BCLog::NET, "filterload received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
4839 | 0 | pfrom.fDisconnect = true; |
4840 | 0 | return; |
4841 | 0 | } |
4842 | 511 | CBloomFilter filter; |
4843 | 511 | vRecv >> filter; |
4844 | | |
4845 | 511 | if (!filter.IsWithinSizeConstraints()) Branch (4845:13): [True: 34, False: 477]
|
4846 | 34 | { |
4847 | | // There is no excuse for sending a too-large filter |
4848 | 34 | Misbehaving(*peer, "too-large bloom filter"); |
4849 | 477 | } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { Branch (4849:56): [True: 269, False: 208]
|
4850 | 269 | { |
4851 | 269 | LOCK(tx_relay->m_bloom_filter_mutex); |
4852 | 269 | tx_relay->m_bloom_filter.reset(new CBloomFilter(filter)); |
4853 | 269 | tx_relay->m_relay_txs = true; |
4854 | 269 | } |
4855 | 269 | pfrom.m_bloom_filter_loaded = true; |
4856 | 269 | pfrom.m_relays_txs = true; |
4857 | 269 | } |
4858 | 511 | return; |
4859 | 511 | } |
4860 | | |
4861 | 4.10k | if (msg_type == NetMsgType::FILTERADD) { Branch (4861:9): [True: 340, False: 3.76k]
|
4862 | 340 | if (!(peer->m_our_services & NODE_BLOOM)) { Branch (4862:13): [True: 0, False: 340]
|
4863 | 0 | LogDebug(BCLog::NET, "filteradd received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
4864 | 0 | pfrom.fDisconnect = true; |
4865 | 0 | return; |
4866 | 0 | } |
4867 | 340 | std::vector<unsigned char> vData; |
4868 | 340 | vRecv >> vData; |
4869 | | |
4870 | | // Nodes must NEVER send a data item > MAX_SCRIPT_ELEMENT_SIZE bytes (the max size for a script data object, |
4871 | | // and thus, the maximum size any matched object can have) in a filteradd message |
4872 | 340 | bool bad = false; |
4873 | 340 | if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) { Branch (4873:13): [True: 0, False: 340]
|
4874 | 0 | bad = true; |
4875 | 340 | } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { Branch (4875:56): [True: 143, False: 197]
|
4876 | 143 | LOCK(tx_relay->m_bloom_filter_mutex); |
4877 | 143 | if (tx_relay->m_bloom_filter) { Branch (4877:17): [True: 90, False: 53]
|
4878 | 90 | tx_relay->m_bloom_filter->insert(vData); |
4879 | 90 | } else { |
4880 | 53 | bad = true; |
4881 | 53 | } |
4882 | 143 | } |
4883 | 340 | if (bad) { Branch (4883:13): [True: 53, False: 287]
|
4884 | 53 | Misbehaving(*peer, "bad filteradd message"); |
4885 | 53 | } |
4886 | 340 | return; |
4887 | 340 | } |
4888 | | |
4889 | 3.76k | if (msg_type == NetMsgType::FILTERCLEAR) { Branch (4889:9): [True: 151, False: 3.60k]
|
4890 | 151 | if (!(peer->m_our_services & NODE_BLOOM)) { Branch (4890:13): [True: 0, False: 151]
|
4891 | 0 | LogDebug(BCLog::NET, "filterclear received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs)); |
4892 | 0 | pfrom.fDisconnect = true; |
4893 | 0 | return; |
4894 | 0 | } |
4895 | 151 | auto tx_relay = peer->GetTxRelay(); |
4896 | 151 | if (!tx_relay) return; Branch (4896:13): [True: 0, False: 151]
|
4897 | | |
4898 | 151 | { |
4899 | 151 | LOCK(tx_relay->m_bloom_filter_mutex); |
4900 | 151 | tx_relay->m_bloom_filter = nullptr; |
4901 | 151 | tx_relay->m_relay_txs = true; |
4902 | 151 | } |
4903 | 151 | pfrom.m_bloom_filter_loaded = false; |
4904 | 151 | pfrom.m_relays_txs = true; |
4905 | 151 | return; |
4906 | 151 | } |
4907 | | |
4908 | 3.60k | if (msg_type == NetMsgType::FEEFILTER) { Branch (4908:9): [True: 231, False: 3.37k]
|
4909 | 231 | CAmount newFeeFilter = 0; |
4910 | 231 | vRecv >> newFeeFilter; |
4911 | 231 | if (MoneyRange(newFeeFilter)) { Branch (4911:13): [True: 25, False: 206]
|
4912 | 25 | if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { Branch (4912:53): [True: 25, False: 0]
|
4913 | 25 | tx_relay->m_fee_filter_received = newFeeFilter; |
4914 | 25 | } |
4915 | 25 | LogDebug(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId()); |
4916 | 25 | } |
4917 | 231 | return; |
4918 | 231 | } |
4919 | | |
4920 | 3.37k | if (msg_type == NetMsgType::GETCFILTERS) { Branch (4920:9): [True: 327, False: 3.05k]
|
4921 | 327 | ProcessGetCFilters(pfrom, *peer, vRecv); |
4922 | 327 | return; |
4923 | 327 | } |
4924 | | |
4925 | 3.05k | if (msg_type == NetMsgType::GETCFHEADERS) { Branch (4925:9): [True: 165, False: 2.88k]
|
4926 | 165 | ProcessGetCFHeaders(pfrom, *peer, vRecv); |
4927 | 165 | return; |
4928 | 165 | } |
4929 | | |
4930 | 2.88k | if (msg_type == NetMsgType::GETCFCHECKPT) { Branch (4930:9): [True: 117, False: 2.76k]
|
4931 | 117 | ProcessGetCFCheckPt(pfrom, *peer, vRecv); |
4932 | 117 | return; |
4933 | 117 | } |
4934 | | |
4935 | 2.76k | if (msg_type == NetMsgType::NOTFOUND) { Branch (4935:9): [True: 296, False: 2.47k]
|
4936 | 296 | std::vector<CInv> vInv; |
4937 | 296 | vRecv >> vInv; |
4938 | 296 | std::vector<uint256> tx_invs; |
4939 | 296 | if (vInv.size() <= node::MAX_PEER_TX_ANNOUNCEMENTS + MAX_BLOCKS_IN_TRANSIT_PER_PEER) { Branch (4939:13): [True: 79, False: 217]
|
4940 | 106 | for (CInv &inv : vInv) { Branch (4940:28): [True: 106, False: 79]
|
4941 | 106 | if (inv.IsGenTxMsg()) { Branch (4941:21): [True: 32, False: 74]
|
4942 | 32 | tx_invs.emplace_back(inv.hash); |
4943 | 32 | } |
4944 | 106 | } |
4945 | 79 | } |
4946 | 296 | LOCK(m_tx_download_mutex); |
4947 | 296 | m_txdownloadman.ReceivedNotFound(pfrom.GetId(), tx_invs); |
4948 | 296 | return; |
4949 | 296 | } |
4950 | | |
4951 | | // Ignore unknown commands for extensibility |
4952 | 2.47k | LogDebug(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId()); |
4953 | 2.47k | return; |
4954 | 2.76k | } |
4955 | | |
4956 | | bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer) |
4957 | 42.4M | { |
4958 | 42.4M | { |
4959 | 42.4M | LOCK(peer.m_misbehavior_mutex); |
4960 | | |
4961 | | // There's nothing to do if the m_should_discourage flag isn't set |
4962 | 42.4M | if (!peer.m_should_discourage) return false; Branch (4962:13): [True: 42.4M, False: 5.65k]
|
4963 | | |
4964 | 5.65k | peer.m_should_discourage = false; |
4965 | 5.65k | } // peer.m_misbehavior_mutex |
4966 | | |
4967 | 5.65k | if (pnode.HasPermission(NetPermissionFlags::NoBan)) { Branch (4967:9): [True: 0, False: 5.65k]
|
4968 | | // We never disconnect or discourage peers for bad behavior if they have NetPermissionFlags::NoBan permission |
4969 | 0 | LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id); |
4970 | 0 | return false; |
4971 | 0 | } |
4972 | | |
4973 | 5.65k | if (pnode.IsManualConn()) { Branch (4973:9): [True: 0, False: 5.65k]
|
4974 | | // We never disconnect or discourage manual peers for bad behavior |
4975 | 0 | LogPrintf("Warning: not punishing manually connected peer %d!\n", peer.m_id); |
4976 | 0 | return false; |
4977 | 0 | } |
4978 | | |
4979 | 5.65k | if (pnode.addr.IsLocal()) { Branch (4979:9): [True: 5.65k, False: 0]
|
4980 | | // We disconnect local peers for bad behavior but don't discourage (since that would discourage |
4981 | | // all peers on the same local address) |
4982 | 5.65k | LogDebug(BCLog::NET, "Warning: disconnecting but not discouraging %s peer %d!\n", |
4983 | 5.65k | pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id); |
4984 | 5.65k | pnode.fDisconnect = true; |
4985 | 5.65k | return true; |
4986 | 5.65k | } |
4987 | | |
4988 | | // Normal case: Disconnect the peer and discourage all nodes sharing the address |
4989 | 0 | LogDebug(BCLog::NET, "Disconnecting and discouraging peer %d!\n", peer.m_id); |
4990 | 0 | if (m_banman) m_banman->Discourage(pnode.addr); Branch (4990:9): [True: 0, False: 0]
|
4991 | 0 | m_connman.DisconnectNode(pnode.addr); |
4992 | 0 | return true; |
4993 | 5.65k | } |
4994 | | |
4995 | | bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc) |
4996 | 42.4M | { |
4997 | 42.4M | AssertLockNotHeld(m_tx_download_mutex); |
4998 | 42.4M | AssertLockHeld(g_msgproc_mutex); |
4999 | | |
5000 | 42.4M | PeerRef peer = GetPeerRef(pfrom->GetId()); |
5001 | 42.4M | if (peer == nullptr) return false; Branch (5001:9): [True: 0, False: 42.4M]
|
5002 | | |
5003 | | // For outbound connections, ensure that the initial VERSION message |
5004 | | // has been sent first before processing any incoming messages |
5005 | 42.4M | if (!pfrom->IsInboundConn() && !peer->m_outbound_version_message_sent) return false; Branch (5005:9): [True: 21.2M, False: 21.2M]
Branch (5005:36): [True: 44.3k, False: 21.1M]
|
5006 | | |
5007 | 42.4M | { |
5008 | 42.4M | LOCK(peer->m_getdata_requests_mutex); |
5009 | 42.4M | if (!peer->m_getdata_requests.empty()) { Branch (5009:13): [True: 24.3k, False: 42.3M]
|
5010 | 24.3k | ProcessGetData(*pfrom, *peer, interruptMsgProc); |
5011 | 24.3k | } |
5012 | 42.4M | } |
5013 | | |
5014 | 42.4M | const bool processed_orphan = ProcessOrphanTx(*peer); |
5015 | | |
5016 | 42.4M | if (pfrom->fDisconnect) Branch (5016:9): [True: 0, False: 42.4M]
|
5017 | 0 | return false; |
5018 | | |
5019 | 42.4M | if (processed_orphan) return true; Branch (5019:9): [True: 8.54k, False: 42.4M]
|
5020 | | |
5021 | | // this maintains the order of responses |
5022 | | // and prevents m_getdata_requests to grow unbounded |
5023 | 42.4M | { |
5024 | 42.4M | LOCK(peer->m_getdata_requests_mutex); |
5025 | 42.4M | if (!peer->m_getdata_requests.empty()) return true; Branch (5025:13): [True: 23.5k, False: 42.3M]
|
5026 | 42.4M | } |
5027 | | |
5028 | | // Don't bother if send buffer is too full to respond anyway |
5029 | 42.3M | if (pfrom->fPauseSend) return false; Branch (5029:9): [True: 0, False: 42.3M]
|
5030 | | |
5031 | 42.3M | auto poll_result{pfrom->PollMessage()}; |
5032 | 42.3M | if (!poll_result) { Branch (5032:9): [True: 36.8M, False: 5.51M]
|
5033 | | // No message to process |
5034 | 36.8M | return false; |
5035 | 36.8M | } |
5036 | | |
5037 | 5.51M | CNetMessage& msg{poll_result->first}; |
5038 | 5.51M | bool fMoreWork = poll_result->second; |
5039 | | |
5040 | 5.51M | TRACEPOINT(net, inbound_message, |
5041 | 5.51M | pfrom->GetId(), |
5042 | 5.51M | pfrom->m_addr_name.c_str(), |
5043 | 5.51M | pfrom->ConnectionTypeAsString().c_str(), |
5044 | 5.51M | msg.m_type.c_str(), |
5045 | 5.51M | msg.m_recv.size(), |
5046 | 5.51M | msg.m_recv.data() |
5047 | 5.51M | ); |
5048 | | |
5049 | 5.51M | if (m_opts.capture_messages) { Branch (5049:9): [True: 0, False: 5.51M]
|
5050 | 0 | CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true); |
5051 | 0 | } |
5052 | | |
5053 | 5.51M | try { |
5054 | 5.51M | ProcessMessage(*pfrom, msg.m_type, msg.m_recv, msg.m_time, interruptMsgProc); |
5055 | 5.51M | if (interruptMsgProc) return false; Branch (5055:13): [True: 0, False: 5.51M]
|
5056 | 5.51M | { |
5057 | 5.51M | LOCK(peer->m_getdata_requests_mutex); |
5058 | 5.51M | if (!peer->m_getdata_requests.empty()) fMoreWork = true; Branch (5058:17): [True: 828, False: 5.51M]
|
5059 | 5.51M | } |
5060 | | // Does this peer has an orphan ready to reconsider? |
5061 | | // (Note: we may have provided a parent for an orphan provided |
5062 | | // by another peer that was already processed; in that case, |
5063 | | // the extra work may not be noticed, possibly resulting in an |
5064 | | // unnecessary 100ms delay) |
5065 | 5.51M | LOCK(m_tx_download_mutex); |
5066 | 5.51M | if (m_txdownloadman.HaveMoreWork(peer->m_id)) fMoreWork = true; Branch (5066:13): [True: 2.62k, False: 5.51M]
|
5067 | 5.51M | } catch (const std::exception& e) { |
5068 | 9.33k | LogDebug(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name()); |
5069 | 9.33k | } catch (...) { |
5070 | 0 | LogDebug(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size); |
5071 | 0 | } |
5072 | | |
5073 | 5.51M | return fMoreWork; |
5074 | 5.51M | } |
5075 | | |
5076 | | void PeerManagerImpl::ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) |
5077 | 41.8M | { |
5078 | 41.8M | AssertLockHeld(cs_main); |
5079 | | |
5080 | 41.8M | CNodeState &state = *State(pto.GetId()); |
5081 | | |
5082 | 41.8M | if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) { Branch (5082:9): [True: 41.1M, False: 668k]
Branch (5082:42): [True: 20.3M, False: 20.8M]
Branch (5082:78): [True: 20.3M, False: 0]
|
5083 | | // This is an outbound peer subject to disconnection if they don't |
5084 | | // announce a block with as much work as the current tip within |
5085 | | // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if |
5086 | | // their chain has more work than ours, we should sync to it, |
5087 | | // unless it's invalid, in which case we should find that out and |
5088 | | // disconnect from them elsewhere). |
5089 | 20.3M | if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork) { Branch (5089:13): [True: 10.3M, False: 10.0M]
Branch (5089:54): [True: 9.07M, False: 1.24M]
|
5090 | | // The outbound peer has sent us a block with at least as much work as our current tip, so reset the timeout if it was set |
5091 | 9.07M | if (state.m_chain_sync.m_timeout != 0s) { Branch (5091:17): [True: 44.4k, False: 9.02M]
|
5092 | 44.4k | state.m_chain_sync.m_timeout = 0s; |
5093 | 44.4k | state.m_chain_sync.m_work_header = nullptr; |
5094 | 44.4k | state.m_chain_sync.m_sent_getheaders = false; |
5095 | 44.4k | } |
5096 | 11.2M | } else if (state.m_chain_sync.m_timeout == 0s || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) { Branch (5096:20): [True: 50.9k, False: 11.2M]
Branch (5096:20): [True: 51.0k, False: 11.2M]
Branch (5096:59): [True: 11.2M, False: 0]
Branch (5096:106): [True: 1.24M, False: 9.95M]
Branch (5096:147): [True: 87, False: 1.24M]
|
5097 | | // At this point we know that the outbound peer has either never sent us a block/header or they have, but its tip is behind ours |
5098 | | // AND |
5099 | | // we are noticing this for the first time (m_timeout is 0) |
5100 | | // OR we noticed this at some point within the last CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds and set a timeout |
5101 | | // for them, they caught up to our tip at the time of setting the timer but not to our current one (we've also advanced). |
5102 | | // Either way, set a new timeout based on our current tip. |
5103 | 51.0k | state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT; |
5104 | 51.0k | state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip(); |
5105 | 51.0k | state.m_chain_sync.m_sent_getheaders = false; |
5106 | 11.2M | } else if (state.m_chain_sync.m_timeout > 0s && time_in_seconds > state.m_chain_sync.m_timeout) { Branch (5106:20): [True: 11.2M, False: 0]
Branch (5106:20): [True: 2.53k, False: 11.1M]
Branch (5106:57): [True: 2.53k, False: 11.1M]
|
5107 | | // No evidence yet that our peer has synced to a chain with work equal to that |
5108 | | // of our tip, when we first detected it was behind. Send a single getheaders |
5109 | | // message to give the peer a chance to update us. |
5110 | 2.53k | if (state.m_chain_sync.m_sent_getheaders) { Branch (5110:17): [True: 539, False: 1.99k]
|
5111 | | // They've run out of time to catch up! |
5112 | 539 | LogInfo("Outbound peer has old chain, best known block = %s, %s\n", state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", pto.DisconnectMsg(fLogIPs)); |
5113 | 539 | pto.fDisconnect = true; |
5114 | 1.99k | } else { |
5115 | 1.99k | assert(state.m_chain_sync.m_work_header); Branch (5115:17): [True: 1.99k, False: 0]
|
5116 | | // Here, we assume that the getheaders message goes out, |
5117 | | // because it'll either go out or be skipped because of a |
5118 | | // getheaders in-flight already, in which case the peer should |
5119 | | // still respond to us with a sufficiently high work chain tip. |
5120 | 1.99k | MaybeSendGetHeaders(pto, |
5121 | 1.99k | GetLocator(state.m_chain_sync.m_work_header->pprev), |
5122 | 1.99k | peer); |
5123 | 1.99k | LogDebug(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString()); |
5124 | 1.99k | state.m_chain_sync.m_sent_getheaders = true; |
5125 | | // Bump the timeout to allow a response, which could clear the timeout |
5126 | | // (if the response shows the peer has synced), reset the timeout (if |
5127 | | // the peer syncs to the required work but not to our tip), or result |
5128 | | // in disconnect (if we advance to the timeout and pindexBestKnownBlock |
5129 | | // has not sufficiently progressed) |
5130 | 1.99k | state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME; |
5131 | 1.99k | } |
5132 | 2.53k | } |
5133 | 20.3M | } |
5134 | 41.8M | } |
5135 | | |
5136 | | void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now) |
5137 | 0 | { |
5138 | | // If we have any extra block-relay-only peers, disconnect the youngest unless |
5139 | | // it's given us a block -- in which case, compare with the second-youngest, and |
5140 | | // out of those two, disconnect the peer who least recently gave us a block. |
5141 | | // The youngest block-relay-only peer would be the extra peer we connected |
5142 | | // to temporarily in order to sync our tip; see net.cpp. |
5143 | | // Note that we use higher nodeid as a measure for most recent connection. |
5144 | 0 | if (m_connman.GetExtraBlockRelayCount() > 0) { Branch (5144:9): [True: 0, False: 0]
|
5145 | 0 | std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0}, next_youngest_peer{-1, 0}; |
5146 | |
|
5147 | 0 | m_connman.ForEachNode([&](CNode* pnode) { |
5148 | 0 | if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) return; Branch (5148:17): [True: 0, False: 0]
Branch (5148:46): [True: 0, False: 0]
|
5149 | 0 | if (pnode->GetId() > youngest_peer.first) { Branch (5149:17): [True: 0, False: 0]
|
5150 | 0 | next_youngest_peer = youngest_peer; |
5151 | 0 | youngest_peer.first = pnode->GetId(); |
5152 | 0 | youngest_peer.second = pnode->m_last_block_time; |
5153 | 0 | } |
5154 | 0 | }); |
5155 | 0 | NodeId to_disconnect = youngest_peer.first; |
5156 | 0 | if (youngest_peer.second > next_youngest_peer.second) { Branch (5156:13): [True: 0, False: 0]
|
5157 | | // Our newest block-relay-only peer gave us a block more recently; |
5158 | | // disconnect our second youngest. |
5159 | 0 | to_disconnect = next_youngest_peer.first; |
5160 | 0 | } |
5161 | 0 | m_connman.ForNode(to_disconnect, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { |
5162 | 0 | AssertLockHeld(::cs_main); |
5163 | | // Make sure we're not getting a block right now, and that |
5164 | | // we've been connected long enough for this eviction to happen |
5165 | | // at all. |
5166 | | // Note that we only request blocks from a peer if we learn of a |
5167 | | // valid headers chain with at least as much work as our tip. |
5168 | 0 | CNodeState *node_state = State(pnode->GetId()); |
5169 | 0 | if (node_state == nullptr || Branch (5169:17): [True: 0, False: 0]
Branch (5169:17): [True: 0, False: 0]
|
5170 | 0 | (now - pnode->m_connected >= MINIMUM_CONNECT_TIME && node_state->vBlocksInFlight.empty())) { Branch (5170:18): [True: 0, False: 0]
Branch (5170:70): [True: 0, False: 0]
|
5171 | 0 | pnode->fDisconnect = true; |
5172 | 0 | LogDebug(BCLog::NET, "disconnecting extra block-relay-only peer=%d (last block received at time %d)\n", |
5173 | 0 | pnode->GetId(), count_seconds(pnode->m_last_block_time)); |
5174 | 0 | return true; |
5175 | 0 | } else { |
5176 | 0 | LogDebug(BCLog::NET, "keeping block-relay-only peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", |
5177 | 0 | pnode->GetId(), count_seconds(pnode->m_connected), node_state->vBlocksInFlight.size()); |
5178 | 0 | } |
5179 | 0 | return false; |
5180 | 0 | }); |
5181 | 0 | } |
5182 | | |
5183 | | // Check whether we have too many outbound-full-relay peers |
5184 | 0 | if (m_connman.GetExtraFullOutboundCount() > 0) { Branch (5184:9): [True: 0, False: 0]
|
5185 | | // If we have more outbound-full-relay peers than we target, disconnect one. |
5186 | | // Pick the outbound-full-relay peer that least recently announced |
5187 | | // us a new block, with ties broken by choosing the more recent |
5188 | | // connection (higher node id) |
5189 | | // Protect peers from eviction if we don't have another connection |
5190 | | // to their network, counting both outbound-full-relay and manual peers. |
5191 | 0 | NodeId worst_peer = -1; |
5192 | 0 | int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max(); |
5193 | |
|
5194 | 0 | m_connman.ForEachNode([&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_connman.GetNodesMutex()) { |
5195 | 0 | AssertLockHeld(::cs_main); |
5196 | | |
5197 | | // Only consider outbound-full-relay peers that are not already |
5198 | | // marked for disconnection |
5199 | 0 | if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) return; Branch (5199:17): [True: 0, False: 0]
Branch (5199:49): [True: 0, False: 0]
|
5200 | 0 | CNodeState *state = State(pnode->GetId()); |
5201 | 0 | if (state == nullptr) return; // shouldn't be possible, but just in case Branch (5201:17): [True: 0, False: 0]
|
5202 | | // Don't evict our protected peers |
5203 | 0 | if (state->m_chain_sync.m_protect) return; Branch (5203:17): [True: 0, False: 0]
|
5204 | | // If this is the only connection on a particular network that is |
5205 | | // OUTBOUND_FULL_RELAY or MANUAL, protect it. |
5206 | 0 | if (!m_connman.MultipleManualOrFullOutboundConns(pnode->addr.GetNetwork())) return; Branch (5206:17): [True: 0, False: 0]
|
5207 | 0 | if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) { Branch (5207:17): [True: 0, False: 0]
Branch (5207:82): [True: 0, False: 0]
Branch (5207:147): [True: 0, False: 0]
|
5208 | 0 | worst_peer = pnode->GetId(); |
5209 | 0 | oldest_block_announcement = state->m_last_block_announcement; |
5210 | 0 | } |
5211 | 0 | }); |
5212 | 0 | if (worst_peer != -1) { Branch (5212:13): [True: 0, False: 0]
|
5213 | 0 | bool disconnected = m_connman.ForNode(worst_peer, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { |
5214 | 0 | AssertLockHeld(::cs_main); |
5215 | | |
5216 | | // Only disconnect a peer that has been connected to us for |
5217 | | // some reasonable fraction of our check-frequency, to give |
5218 | | // it time for new information to have arrived. |
5219 | | // Also don't disconnect any peer we're trying to download a |
5220 | | // block from. |
5221 | 0 | CNodeState &state = *State(pnode->GetId()); |
5222 | 0 | if (now - pnode->m_connected > MINIMUM_CONNECT_TIME && state.vBlocksInFlight.empty()) { Branch (5222:21): [True: 0, False: 0]
Branch (5222:21): [True: 0, False: 0]
Branch (5222:72): [True: 0, False: 0]
|
5223 | 0 | LogDebug(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement); |
5224 | 0 | pnode->fDisconnect = true; |
5225 | 0 | return true; |
5226 | 0 | } else { |
5227 | 0 | LogDebug(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", |
5228 | 0 | pnode->GetId(), count_seconds(pnode->m_connected), state.vBlocksInFlight.size()); |
5229 | 0 | return false; |
5230 | 0 | } |
5231 | 0 | }); |
5232 | 0 | if (disconnected) { Branch (5232:17): [True: 0, False: 0]
|
5233 | | // If we disconnected an extra peer, that means we successfully |
5234 | | // connected to at least one peer after the last time we |
5235 | | // detected a stale tip. Don't try any more extra peers until |
5236 | | // we next detect a stale tip, to limit the load we put on the |
5237 | | // network from these extra connections. |
5238 | 0 | m_connman.SetTryNewOutboundPeer(false); |
5239 | 0 | } |
5240 | 0 | } |
5241 | 0 | } |
5242 | 0 | } |
5243 | | |
5244 | | void PeerManagerImpl::CheckForStaleTipAndEvictPeers() |
5245 | 0 | { |
5246 | 0 | LOCK(cs_main); |
5247 | |
|
5248 | 0 | auto now{GetTime<std::chrono::seconds>()}; |
5249 | |
|
5250 | 0 | EvictExtraOutboundPeers(now); |
5251 | |
|
5252 | 0 | if (now > m_stale_tip_check_time) { Branch (5252:9): [True: 0, False: 0]
|
5253 | | // Check whether our tip is stale, and if so, allow using an extra |
5254 | | // outbound peer |
5255 | 0 | if (!m_chainman.m_blockman.LoadingBlocks() && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) { Branch (5255:13): [True: 0, False: 0]
Branch (5255:55): [True: 0, False: 0]
Branch (5255:87): [True: 0, False: 0]
Branch (5255:124): [True: 0, False: 0]
|
5256 | 0 | LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", |
5257 | 0 | count_seconds(now - m_last_tip_update.load())); |
5258 | 0 | m_connman.SetTryNewOutboundPeer(true); |
5259 | 0 | } else if (m_connman.GetTryNewOutboundPeer()) { Branch (5259:20): [True: 0, False: 0]
|
5260 | 0 | m_connman.SetTryNewOutboundPeer(false); |
5261 | 0 | } |
5262 | 0 | m_stale_tip_check_time = now + STALE_CHECK_INTERVAL; |
5263 | 0 | } |
5264 | |
|
5265 | 0 | if (!m_initial_sync_finished && CanDirectFetch()) { Branch (5265:9): [True: 0, False: 0]
Branch (5265:37): [True: 0, False: 0]
|
5266 | 0 | m_connman.StartExtraBlockRelayPeers(); |
5267 | 0 | m_initial_sync_finished = true; |
5268 | 0 | } |
5269 | 0 | } |
5270 | | |
5271 | | void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now) |
5272 | 41.8M | { |
5273 | 41.8M | if (m_connman.ShouldRunInactivityChecks(node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) && Branch (5273:9): [True: 0, False: 41.8M]
Branch (5273:9): [True: 0, False: 41.8M]
|
5274 | 41.8M | peer.m_ping_nonce_sent && Branch (5274:9): [True: 0, False: 0]
|
5275 | 41.8M | now > peer.m_ping_start.load() + TIMEOUT_INTERVAL) Branch (5275:9): [True: 0, False: 0]
|
5276 | 0 | { |
5277 | | // The ping timeout is using mocktime. To disable the check during |
5278 | | // testing, increase -peertimeout. |
5279 | 0 | LogDebug(BCLog::NET, "ping timeout: %fs, %s", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), node_to.DisconnectMsg(fLogIPs)); |
5280 | 0 | node_to.fDisconnect = true; |
5281 | 0 | return; |
5282 | 0 | } |
5283 | | |
5284 | 41.8M | bool pingSend = false; |
5285 | | |
5286 | 41.8M | if (peer.m_ping_queued) { Branch (5286:9): [True: 0, False: 41.8M]
|
5287 | | // RPC ping request by user |
5288 | 0 | pingSend = true; |
5289 | 0 | } |
5290 | | |
5291 | 41.8M | if (peer.m_ping_nonce_sent == 0 && now > peer.m_ping_start.load() + PING_INTERVAL) { Branch (5291:9): [True: 89.5k, False: 41.7M]
Branch (5291:9): [True: 88.7k, False: 41.7M]
Branch (5291:40): [True: 88.7k, False: 793]
|
5292 | | // Ping automatically sent as a latency probe & keepalive. |
5293 | 88.7k | pingSend = true; |
5294 | 88.7k | } |
5295 | | |
5296 | 41.8M | if (pingSend) { Branch (5296:9): [True: 88.7k, False: 41.7M]
|
5297 | 88.7k | uint64_t nonce; |
5298 | 88.7k | do { |
5299 | 88.7k | nonce = FastRandomContext().rand64(); |
5300 | 88.7k | } while (nonce == 0); Branch (5300:18): [True: 0, False: 88.7k]
|
5301 | 88.7k | peer.m_ping_queued = false; |
5302 | 88.7k | peer.m_ping_start = now; |
5303 | 88.7k | if (node_to.GetCommonVersion() > BIP0031_VERSION) { Branch (5303:13): [True: 88.7k, False: 0]
|
5304 | 88.7k | peer.m_ping_nonce_sent = nonce; |
5305 | 88.7k | MakeAndPushMessage(node_to, NetMsgType::PING, nonce); |
5306 | 88.7k | } else { |
5307 | | // Peer is too old to support ping command with nonce, pong will never arrive. |
5308 | 0 | peer.m_ping_nonce_sent = 0; |
5309 | 0 | MakeAndPushMessage(node_to, NetMsgType::PING); |
5310 | 0 | } |
5311 | 88.7k | } |
5312 | 41.8M | } |
5313 | | |
5314 | | void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) |
5315 | 41.8M | { |
5316 | | // Nothing to do for non-address-relay peers |
5317 | 41.8M | if (!peer.m_addr_relay_enabled) return; Branch (5317:9): [True: 20.6M, False: 21.1M]
|
5318 | | |
5319 | 21.1M | LOCK(peer.m_addr_send_times_mutex); |
5320 | | // Periodically advertise our local address to the peer. |
5321 | 21.1M | if (fListen && !m_chainman.IsInitialBlockDownload() && Branch (5321:9): [True: 21.1M, False: 0]
Branch (5321:20): [True: 21.1M, False: 0]
|
5322 | 21.1M | peer.m_next_local_addr_send < current_time) { Branch (5322:9): [True: 63.9k, False: 21.0M]
|
5323 | | // If we've sent before, clear the bloom filter for the peer, so that our |
5324 | | // self-announcement will actually go out. |
5325 | | // This might be unnecessary if the bloom filter has already rolled |
5326 | | // over since our last self-announcement, but there is only a small |
5327 | | // bandwidth cost that we can incur by doing this (which happens |
5328 | | // once a day on average). |
5329 | 63.9k | if (peer.m_next_local_addr_send != 0us) { Branch (5329:13): [True: 18.3k, False: 45.5k]
|
5330 | 18.3k | peer.m_addr_known->reset(); |
5331 | 18.3k | } |
5332 | 63.9k | if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) { Branch (5332:37): [True: 0, False: 63.9k]
|
5333 | 0 | CAddress local_addr{*local_service, peer.m_our_services, Now<NodeSeconds>()}; |
5334 | 0 | PushAddress(peer, local_addr); |
5335 | 0 | } |
5336 | 63.9k | peer.m_next_local_addr_send = current_time + m_rng.rand_exp_duration(AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); |
5337 | 63.9k | } |
5338 | | |
5339 | | // We sent an `addr` message to this peer recently. Nothing more to do. |
5340 | 21.1M | if (current_time <= peer.m_next_addr_send) return; Branch (5340:9): [True: 20.7M, False: 398k]
|
5341 | | |
5342 | 398k | peer.m_next_addr_send = current_time + m_rng.rand_exp_duration(AVG_ADDRESS_BROADCAST_INTERVAL); |
5343 | | |
5344 | 398k | if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) { Branch (5344:9): [True: 0, False: 398k]
|
5345 | | // Should be impossible since we always check size before adding to |
5346 | | // m_addrs_to_send. Recover by trimming the vector. |
5347 | 0 | peer.m_addrs_to_send.resize(MAX_ADDR_TO_SEND); |
5348 | 0 | } |
5349 | | |
5350 | | // Remove addr records that the peer already knows about, and add new |
5351 | | // addrs to the m_addr_known filter on the same pass. |
5352 | 398k | auto addr_already_known = [&peer](const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) { |
5353 | 276 | bool ret = peer.m_addr_known->contains(addr.GetKey()); |
5354 | 276 | if (!ret) peer.m_addr_known->insert(addr.GetKey()); Branch (5354:13): [True: 249, False: 27]
|
5355 | 276 | return ret; |
5356 | 276 | }; |
5357 | 398k | peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(), peer.m_addrs_to_send.end(), addr_already_known), |
5358 | 398k | peer.m_addrs_to_send.end()); |
5359 | | |
5360 | | // No addr messages to send |
5361 | 398k | if (peer.m_addrs_to_send.empty()) return; Branch (5361:9): [True: 398k, False: 145]
|
5362 | | |
5363 | 145 | if (peer.m_wants_addrv2) { Branch (5363:9): [True: 0, False: 145]
|
5364 | 0 | MakeAndPushMessage(node, NetMsgType::ADDRV2, CAddress::V2_NETWORK(peer.m_addrs_to_send)); |
5365 | 145 | } else { |
5366 | 145 | MakeAndPushMessage(node, NetMsgType::ADDR, CAddress::V1_NETWORK(peer.m_addrs_to_send)); |
5367 | 145 | } |
5368 | 145 | peer.m_addrs_to_send.clear(); |
5369 | | |
5370 | | // we only send the big addr message once |
5371 | 145 | if (peer.m_addrs_to_send.capacity() > 40) { Branch (5371:9): [True: 0, False: 145]
|
5372 | 0 | peer.m_addrs_to_send.shrink_to_fit(); |
5373 | 0 | } |
5374 | 145 | } |
5375 | | |
5376 | | void PeerManagerImpl::MaybeSendSendHeaders(CNode& node, Peer& peer) |
5377 | 41.8M | { |
5378 | | // Delay sending SENDHEADERS (BIP 130) until we're done with an |
5379 | | // initial-headers-sync with this peer. Receiving headers announcements for |
5380 | | // new blocks while trying to sync their headers chain is problematic, |
5381 | | // because of the state tracking done. |
5382 | 41.8M | if (!peer.m_sent_sendheaders && node.GetCommonVersion() >= SENDHEADERS_VERSION) { Branch (5382:9): [True: 20.4M, False: 21.4M]
Branch (5382:37): [True: 20.4M, False: 0]
|
5383 | 20.4M | LOCK(cs_main); |
5384 | 20.4M | CNodeState &state = *State(node.GetId()); |
5385 | 20.4M | if (state.pindexBestKnownBlock != nullptr && Branch (5385:13): [True: 88.7k, False: 20.3M]
|
5386 | 20.4M | state.pindexBestKnownBlock->nChainWork > m_chainman.MinimumChainWork()) { Branch (5386:17): [True: 88.7k, False: 0]
|
5387 | | // Tell our peer we prefer to receive headers rather than inv's |
5388 | | // We send this to non-NODE NETWORK peers as well, because even |
5389 | | // non-NODE NETWORK peers can announce blocks (such as pruning |
5390 | | // nodes) |
5391 | 88.7k | MakeAndPushMessage(node, NetMsgType::SENDHEADERS); |
5392 | 88.7k | peer.m_sent_sendheaders = true; |
5393 | 88.7k | } |
5394 | 20.4M | } |
5395 | 41.8M | } |
5396 | | |
5397 | | void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::microseconds current_time) |
5398 | 41.8M | { |
5399 | 41.8M | if (m_opts.ignore_incoming_txs) return; Branch (5399:9): [True: 0, False: 41.8M]
|
5400 | 41.8M | if (pto.GetCommonVersion() < FEEFILTER_VERSION) return; Branch (5400:9): [True: 0, False: 41.8M]
|
5401 | | // peers with the forcerelay permission should not filter txs to us |
5402 | 41.8M | if (pto.HasPermission(NetPermissionFlags::ForceRelay)) return; Branch (5402:9): [True: 0, False: 41.8M]
|
5403 | | // Don't send feefilter messages to outbound block-relay-only peers since they should never announce |
5404 | | // transactions to us, regardless of feefilter state. |
5405 | 41.8M | if (pto.IsBlockOnlyConn()) return; Branch (5405:9): [True: 0, False: 41.8M]
|
5406 | | |
5407 | 41.8M | CAmount currentFilter = m_mempool.GetMinFee().GetFeePerK(); |
5408 | | |
5409 | 41.8M | if (m_chainman.IsInitialBlockDownload()) { Branch (5409:9): [True: 0, False: 41.8M]
|
5410 | | // Received tx-inv messages are discarded when the active |
5411 | | // chainstate is in IBD, so tell the peer to not send them. |
5412 | 0 | currentFilter = MAX_MONEY; |
5413 | 41.8M | } else { |
5414 | 41.8M | static const CAmount MAX_FILTER{m_fee_filter_rounder.round(MAX_MONEY)}; |
5415 | 41.8M | if (peer.m_fee_filter_sent == MAX_FILTER) { Branch (5415:13): [True: 0, False: 41.8M]
|
5416 | | // Send the current filter if we sent MAX_FILTER previously |
5417 | | // and made it out of IBD. |
5418 | 0 | peer.m_next_send_feefilter = 0us; |
5419 | 0 | } |
5420 | 41.8M | } |
5421 | 41.8M | if (current_time > peer.m_next_send_feefilter) { Branch (5421:9): [True: 242k, False: 41.6M]
|
5422 | 242k | CAmount filterToSend = m_fee_filter_rounder.round(currentFilter); |
5423 | | // We always have a fee filter of at least the min relay fee |
5424 | 242k | filterToSend = std::max(filterToSend, m_mempool.m_opts.min_relay_feerate.GetFeePerK()); |
5425 | 242k | if (filterToSend != peer.m_fee_filter_sent) { Branch (5425:13): [True: 88.7k, False: 153k]
|
5426 | 88.7k | MakeAndPushMessage(pto, NetMsgType::FEEFILTER, filterToSend); |
5427 | 88.7k | peer.m_fee_filter_sent = filterToSend; |
5428 | 88.7k | } |
5429 | 242k | peer.m_next_send_feefilter = current_time + m_rng.rand_exp_duration(AVG_FEEFILTER_BROADCAST_INTERVAL); |
5430 | 242k | } |
5431 | | // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY |
5432 | | // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY. |
5433 | 41.6M | else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < peer.m_next_send_feefilter && Branch (5433:14): [True: 187k, False: 41.4M]
Branch (5433:14): [True: 187k, False: 41.4M]
|
5434 | 41.6M | (currentFilter < 3 * peer.m_fee_filter_sent / 4 || currentFilter > 4 * peer.m_fee_filter_sent / 3)) { Branch (5434:18): [True: 187k, False: 0]
Branch (5434:68): [True: 0, False: 0]
|
5435 | 187k | peer.m_next_send_feefilter = current_time + m_rng.randrange<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY); |
5436 | 187k | } |
5437 | 41.8M | } |
5438 | | |
5439 | | namespace { |
5440 | | class CompareInvMempoolOrder |
5441 | | { |
5442 | | CTxMemPool* mp; |
5443 | | bool m_wtxid_relay; |
5444 | | public: |
5445 | | explicit CompareInvMempoolOrder(CTxMemPool *_mempool, bool use_wtxid) |
5446 | 4.30M | { |
5447 | 4.30M | mp = _mempool; |
5448 | 4.30M | m_wtxid_relay = use_wtxid; |
5449 | 4.30M | } |
5450 | | |
5451 | | bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b) |
5452 | 411k | { |
5453 | | /* As std::make_heap produces a max-heap, we want the entries with the |
5454 | | * fewest ancestors/highest fee to sort later. */ |
5455 | 411k | return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay); |
5456 | 411k | } |
5457 | | }; |
5458 | | } // namespace |
5459 | | |
5460 | | bool PeerManagerImpl::RejectIncomingTxs(const CNode& peer) const |
5461 | 943k | { |
5462 | | // block-relay-only peers may never send txs to us |
5463 | 943k | if (peer.IsBlockOnlyConn()) return true; Branch (5463:9): [True: 0, False: 943k]
|
5464 | 943k | if (peer.IsFeelerConn()) return true; Branch (5464:9): [True: 0, False: 943k]
|
5465 | | // In -blocksonly mode, peers need the 'relay' permission to send txs to us |
5466 | 943k | if (m_opts.ignore_incoming_txs && !peer.HasPermission(NetPermissionFlags::Relay)) return true; Branch (5466:9): [True: 0, False: 943k]
Branch (5466:39): [True: 0, False: 0]
|
5467 | 943k | return false; |
5468 | 943k | } |
5469 | | |
5470 | | bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer) |
5471 | 48.0k | { |
5472 | | // We don't participate in addr relay with outbound block-relay-only |
5473 | | // connections to prevent providing adversaries with the additional |
5474 | | // information of addr traffic to infer the link. |
5475 | 48.0k | if (node.IsBlockOnlyConn()) return false; Branch (5475:9): [True: 0, False: 48.0k]
|
5476 | | |
5477 | 48.0k | if (!peer.m_addr_relay_enabled.exchange(true)) { Branch (5477:9): [True: 45.5k, False: 2.42k]
|
5478 | | // During version message processing (non-block-relay-only outbound peers) |
5479 | | // or on first addr-related message we have received (inbound peers), initialize |
5480 | | // m_addr_known. |
5481 | 45.5k | peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001); |
5482 | 45.5k | } |
5483 | | |
5484 | 48.0k | return true; |
5485 | 48.0k | } |
5486 | | |
5487 | | bool PeerManagerImpl::SendMessages(CNode* pto) |
5488 | 42.4M | { |
5489 | 42.4M | AssertLockNotHeld(m_tx_download_mutex); |
5490 | 42.4M | AssertLockHeld(g_msgproc_mutex); |
5491 | | |
5492 | 42.4M | PeerRef peer = GetPeerRef(pto->GetId()); |
5493 | 42.4M | if (!peer) return false; Branch (5493:9): [True: 0, False: 42.4M]
|
5494 | 42.4M | const Consensus::Params& consensusParams = m_chainparams.GetConsensus(); |
5495 | | |
5496 | | // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll |
5497 | | // disconnect misbehaving peers even before the version handshake is complete. |
5498 | 42.4M | if (MaybeDiscourageAndDisconnect(*pto, *peer)) return true; Branch (5498:9): [True: 5.65k, False: 42.4M]
|
5499 | | |
5500 | | // Initiate version handshake for outbound connections |
5501 | 42.4M | if (!pto->IsInboundConn() && !peer->m_outbound_version_message_sent) { Branch (5501:9): [True: 21.2M, False: 21.2M]
Branch (5501:34): [True: 44.3k, False: 21.1M]
|
5502 | 44.3k | PushNodeVersion(*pto, *peer); |
5503 | 44.3k | peer->m_outbound_version_message_sent = true; |
5504 | 44.3k | } |
5505 | | |
5506 | | // Don't send anything until the version handshake is complete |
5507 | 42.4M | if (!pto->fSuccessfullyConnected || pto->fDisconnect) Branch (5507:9): [True: 600k, False: 41.8M]
Branch (5507:41): [True: 815, False: 41.8M]
|
5508 | 601k | return true; |
5509 | | |
5510 | 41.8M | const auto current_time{GetTime<std::chrono::microseconds>()}; |
5511 | | |
5512 | 41.8M | if (pto->IsAddrFetchConn() && current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) { Branch (5512:9): [True: 0, False: 41.8M]
Branch (5512:9): [True: 0, False: 41.8M]
Branch (5512:35): [True: 0, False: 0]
|
5513 | 0 | LogDebug(BCLog::NET, "addrfetch connection timeout, %s\n", pto->DisconnectMsg(fLogIPs)); |
5514 | 0 | pto->fDisconnect = true; |
5515 | 0 | return true; |
5516 | 0 | } |
5517 | | |
5518 | 41.8M | MaybeSendPing(*pto, *peer, current_time); |
5519 | | |
5520 | | // MaybeSendPing may have marked peer for disconnection |
5521 | 41.8M | if (pto->fDisconnect) return true; Branch (5521:9): [True: 0, False: 41.8M]
|
5522 | | |
5523 | 41.8M | MaybeSendAddr(*pto, *peer, current_time); |
5524 | | |
5525 | 41.8M | MaybeSendSendHeaders(*pto, *peer); |
5526 | | |
5527 | 41.8M | { |
5528 | 41.8M | LOCK(cs_main); |
5529 | | |
5530 | 41.8M | CNodeState &state = *State(pto->GetId()); |
5531 | | |
5532 | | // Start block sync |
5533 | 41.8M | if (m_chainman.m_best_header == nullptr) { Branch (5533:13): [True: 0, False: 41.8M]
|
5534 | 0 | m_chainman.m_best_header = m_chainman.ActiveChain().Tip(); |
5535 | 0 | } |
5536 | | |
5537 | | // Determine whether we might try initial headers sync or parallel |
5538 | | // block download from this peer -- this mostly affects behavior while |
5539 | | // in IBD (once out of IBD, we sync from all peers). |
5540 | 41.8M | bool sync_blocks_and_headers_from_peer = false; |
5541 | 41.8M | if (state.fPreferredDownload) { Branch (5541:13): [True: 20.9M, False: 20.8M]
|
5542 | 20.9M | sync_blocks_and_headers_from_peer = true; |
5543 | 20.9M | } else if (CanServeBlocks(*peer) && !pto->IsAddrFetchConn()) { Branch (5543:20): [True: 20.8M, False: 0]
Branch (5543:45): [True: 20.8M, False: 0]
|
5544 | | // Typically this is an inbound peer. If we don't have any outbound |
5545 | | // peers, or if we aren't downloading any blocks from such peers, |
5546 | | // then allow block downloads from this peer, too. |
5547 | | // We prefer downloading blocks from outbound peers to avoid |
5548 | | // putting undue load on (say) some home user who is just making |
5549 | | // outbound connections to the network, but if our only source of |
5550 | | // the latest blocks is from an inbound peer, we have to be sure to |
5551 | | // eventually download it (and not just wait indefinitely for an |
5552 | | // outbound peer to have it). |
5553 | 20.8M | if (m_num_preferred_download_peers == 0 || mapBlocksInFlight.empty()) { Branch (5553:17): [True: 32.6k, False: 20.8M]
Branch (5553:56): [True: 19.6M, False: 1.14M]
|
5554 | 19.7M | sync_blocks_and_headers_from_peer = true; |
5555 | 19.7M | } |
5556 | 20.8M | } |
5557 | | |
5558 | 41.8M | if (!state.fSyncStarted && CanServeBlocks(*peer) && !m_chainman.m_blockman.LoadingBlocks()) { Branch (5558:13): [True: 88.7k, False: 41.7M]
Branch (5558:36): [True: 88.7k, False: 0]
Branch (5558:61): [True: 88.7k, False: 0]
|
5559 | | // Only actively request headers from a single peer, unless we're close to today. |
5560 | 88.7k | if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() > NodeClock::now() - 24h) { Branch (5560:17): [True: 88.7k, False: 0]
Branch (5560:18): [True: 11.0k, False: 77.6k]
Branch (5560:39): [True: 11.0k, False: 0]
Branch (5560:77): [True: 77.6k, False: 0]
|
5561 | 88.7k | const CBlockIndex* pindexStart = m_chainman.m_best_header; |
5562 | | /* If possible, start at the block preceding the currently |
5563 | | best known header. This ensures that we always get a |
5564 | | non-empty list of headers back as long as the peer |
5565 | | is up-to-date. With a non-empty response, we can initialise |
5566 | | the peer's known best block. This wouldn't be possible |
5567 | | if we requested starting at m_chainman.m_best_header and |
5568 | | got back an empty response. */ |
5569 | 88.7k | if (pindexStart->pprev) Branch (5569:21): [True: 18.3k, False: 70.3k]
|
5570 | 18.3k | pindexStart = pindexStart->pprev; |
5571 | 88.7k | if (MaybeSendGetHeaders(*pto, GetLocator(pindexStart), *peer)) { Branch (5571:21): [True: 88.7k, False: 0]
|
5572 | 88.7k | LogDebug(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height); |
5573 | | |
5574 | 88.7k | state.fSyncStarted = true; |
5575 | 88.7k | peer->m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE + |
5576 | 88.7k | ( |
5577 | | // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling |
5578 | | // to maintain precision |
5579 | 88.7k | std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} * |
5580 | 88.7k | Ticks<std::chrono::seconds>(NodeClock::now() - m_chainman.m_best_header->Time()) / consensusParams.nPowTargetSpacing |
5581 | 88.7k | ); |
5582 | 88.7k | nSyncStarted++; |
5583 | 88.7k | } |
5584 | 88.7k | } |
5585 | 88.7k | } |
5586 | | |
5587 | | // |
5588 | | // Try sending block announcements via headers |
5589 | | // |
5590 | 41.8M | { |
5591 | | // If we have no more than MAX_BLOCKS_TO_ANNOUNCE in our |
5592 | | // list of block hashes we're relaying, and our peer wants |
5593 | | // headers announcements, then find the first header |
5594 | | // not yet known to our peer but would connect, and send. |
5595 | | // If no header would connect, or if we have too many |
5596 | | // blocks, or if the peer doesn't want headers, just |
5597 | | // add all to the inv queue. |
5598 | 41.8M | LOCK(peer->m_block_inv_mutex); |
5599 | 41.8M | std::vector<CBlock> vHeaders; |
5600 | 41.8M | bool fRevertToInv = ((!peer->m_prefers_headers && Branch (5600:35): [True: 41.7M, False: 48.6k]
|
5601 | 41.8M | (!state.m_requested_hb_cmpctblocks || peer->m_blocks_for_headers_relay.size() > 1)) || Branch (5601:35): [True: 20.9M, False: 20.8M]
Branch (5601:72): [True: 1.01M, False: 19.8M]
|
5602 | 41.8M | peer->m_blocks_for_headers_relay.size() > MAX_BLOCKS_TO_ANNOUNCE); Branch (5602:34): [True: 0, False: 19.8M]
|
5603 | 41.8M | const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery |
5604 | 41.8M | ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date |
5605 | | |
5606 | 41.8M | if (!fRevertToInv) { Branch (5606:17): [True: 19.8M, False: 21.9M]
|
5607 | 19.8M | bool fFoundStartingHeader = false; |
5608 | | // Try to find first header that our peer doesn't have, and |
5609 | | // then send all headers past that one. If we come across any |
5610 | | // headers that aren't on m_chainman.ActiveChain(), give up. |
5611 | 19.8M | for (const uint256& hash : peer->m_blocks_for_headers_relay) { Branch (5611:42): [True: 3.86M, False: 16.0M]
|
5612 | 3.86M | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash); |
5613 | 3.86M | assert(pindex); Branch (5613:21): [True: 3.86M, False: 0]
|
5614 | 3.86M | if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) { Branch (5614:25): [True: 46, False: 3.86M]
|
5615 | | // Bail out if we reorged away from this block |
5616 | 46 | fRevertToInv = true; |
5617 | 46 | break; |
5618 | 46 | } |
5619 | 3.86M | if (pBestIndex != nullptr && pindex->pprev != pBestIndex) { Branch (5619:25): [True: 93, False: 3.86M]
Branch (5619:50): [True: 0, False: 93]
|
5620 | | // This means that the list of blocks to announce don't |
5621 | | // connect to each other. |
5622 | | // This shouldn't really be possible to hit during |
5623 | | // regular operation (because reorgs should take us to |
5624 | | // a chain that has some block not on the prior chain, |
5625 | | // which should be caught by the prior check), but one |
5626 | | // way this could happen is by using invalidateblock / |
5627 | | // reconsiderblock repeatedly on the tip, causing it to |
5628 | | // be added multiple times to m_blocks_for_headers_relay. |
5629 | | // Robustly deal with this rare situation by reverting |
5630 | | // to an inv. |
5631 | 0 | fRevertToInv = true; |
5632 | 0 | break; |
5633 | 0 | } |
5634 | 3.86M | pBestIndex = pindex; |
5635 | 3.86M | if (fFoundStartingHeader) { Branch (5635:25): [True: 79, False: 3.86M]
|
5636 | | // add this to the headers message |
5637 | 79 | vHeaders.emplace_back(pindex->GetBlockHeader()); |
5638 | 3.86M | } else if (PeerHasHeader(&state, pindex)) { Branch (5638:32): [True: 6.55k, False: 3.85M]
|
5639 | 6.55k | continue; // keep looking for the first new block |
5640 | 3.85M | } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) { Branch (5640:32): [True: 0, False: 3.85M]
Branch (5640:60): [True: 768, False: 3.85M]
|
5641 | | // Peer doesn't have this header but they do have the prior one. |
5642 | | // Start sending headers. |
5643 | 768 | fFoundStartingHeader = true; |
5644 | 768 | vHeaders.emplace_back(pindex->GetBlockHeader()); |
5645 | 3.85M | } else { |
5646 | | // Peer doesn't have this header or the prior one -- nothing will |
5647 | | // connect, so bail out. |
5648 | 3.85M | fRevertToInv = true; |
5649 | 3.85M | break; |
5650 | 3.85M | } |
5651 | 3.86M | } |
5652 | 19.8M | } |
5653 | 41.8M | if (!fRevertToInv && !vHeaders.empty()) { Branch (5653:17): [True: 16.0M, False: 25.8M]
Branch (5653:34): [True: 767, False: 16.0M]
|
5654 | 767 | if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) { Branch (5654:21): [True: 723, False: 44]
Branch (5654:45): [True: 689, False: 34]
|
5655 | | // We only send up to 1 block as header-and-ids, as otherwise |
5656 | | // probably means we're doing an initial-ish-sync or they're slow |
5657 | 689 | LogDebug(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__, |
5658 | 689 | vHeaders.front().GetHash().ToString(), pto->GetId()); |
5659 | | |
5660 | 689 | std::optional<CSerializedNetMsg> cached_cmpctblock_msg; |
5661 | 689 | { |
5662 | 689 | LOCK(m_most_recent_block_mutex); |
5663 | 689 | if (m_most_recent_block_hash == pBestIndex->GetBlockHash()) { Branch (5663:29): [True: 163, False: 526]
|
5664 | 163 | cached_cmpctblock_msg = NetMsg::Make(NetMsgType::CMPCTBLOCK, *m_most_recent_compact_block); |
5665 | 163 | } |
5666 | 689 | } |
5667 | 689 | if (cached_cmpctblock_msg.has_value()) { Branch (5667:25): [True: 163, False: 526]
|
5668 | 163 | PushMessage(*pto, std::move(cached_cmpctblock_msg.value())); |
5669 | 526 | } else { |
5670 | 526 | CBlock block; |
5671 | 526 | const bool ret{m_chainman.m_blockman.ReadBlock(block, *pBestIndex)}; |
5672 | 526 | assert(ret); Branch (5672:25): [True: 526, False: 0]
|
5673 | 526 | CBlockHeaderAndShortTxIDs cmpctblock{block, m_rng.rand64()}; |
5674 | 526 | MakeAndPushMessage(*pto, NetMsgType::CMPCTBLOCK, cmpctblock); |
5675 | 526 | } |
5676 | 689 | state.pindexBestHeaderSent = pBestIndex; |
5677 | 689 | } else if (peer->m_prefers_headers) { Branch (5677:28): [True: 78, False: 0]
|
5678 | 78 | if (vHeaders.size() > 1) { Branch (5678:25): [True: 44, False: 34]
|
5679 | 44 | LogDebug(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__, |
5680 | 44 | vHeaders.size(), |
5681 | 44 | vHeaders.front().GetHash().ToString(), |
5682 | 44 | vHeaders.back().GetHash().ToString(), pto->GetId()); |
5683 | 44 | } else { |
5684 | 34 | LogDebug(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__, |
5685 | 34 | vHeaders.front().GetHash().ToString(), pto->GetId()); |
5686 | 34 | } |
5687 | 78 | MakeAndPushMessage(*pto, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders)); |
5688 | 78 | state.pindexBestHeaderSent = pBestIndex; |
5689 | 78 | } else |
5690 | 0 | fRevertToInv = true; |
5691 | 767 | } |
5692 | 41.8M | if (fRevertToInv) { Branch (5692:17): [True: 25.8M, False: 16.0M]
|
5693 | | // If falling back to using an inv, just try to inv the tip. |
5694 | | // The last entry in m_blocks_for_headers_relay was our tip at some point |
5695 | | // in the past. |
5696 | 25.8M | if (!peer->m_blocks_for_headers_relay.empty()) { Branch (5696:21): [True: 9.76M, False: 16.0M]
|
5697 | 9.76M | const uint256& hashToAnnounce = peer->m_blocks_for_headers_relay.back(); |
5698 | 9.76M | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce); |
5699 | 9.76M | assert(pindex); Branch (5699:21): [True: 9.76M, False: 0]
|
5700 | | |
5701 | | // Warn if we're announcing a block that is not on the main chain. |
5702 | | // This should be very rare and could be optimized out. |
5703 | | // Just log for now. |
5704 | 9.76M | if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) { Branch (5704:25): [True: 284, False: 9.76M]
|
5705 | 284 | LogDebug(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n", |
5706 | 284 | hashToAnnounce.ToString(), m_chainman.ActiveChain().Tip()->GetBlockHash().ToString()); |
5707 | 284 | } |
5708 | | |
5709 | | // If the peer's chain has this block, don't inv it back. |
5710 | 9.76M | if (!PeerHasHeader(&state, pindex)) { Branch (5710:25): [True: 9.76M, False: 1.14k]
|
5711 | 9.76M | peer->m_blocks_for_inv_relay.push_back(hashToAnnounce); |
5712 | 9.76M | LogDebug(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__, |
5713 | 9.76M | pto->GetId(), hashToAnnounce.ToString()); |
5714 | 9.76M | } |
5715 | 9.76M | } |
5716 | 25.8M | } |
5717 | 41.8M | peer->m_blocks_for_headers_relay.clear(); |
5718 | 41.8M | } |
5719 | | |
5720 | | // |
5721 | | // Message: inventory |
5722 | | // |
5723 | 0 | std::vector<CInv> vInv; |
5724 | 41.8M | { |
5725 | 41.8M | LOCK(peer->m_block_inv_mutex); |
5726 | 41.8M | vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(), INVENTORY_BROADCAST_TARGET)); |
5727 | | |
5728 | | // Add blocks |
5729 | 41.8M | for (const uint256& hash : peer->m_blocks_for_inv_relay) { Branch (5729:38): [True: 9.77M, False: 41.8M]
|
5730 | 9.77M | vInv.emplace_back(MSG_BLOCK, hash); |
5731 | 9.77M | if (vInv.size() == MAX_INV_SZ) { Branch (5731:21): [True: 0, False: 9.77M]
|
5732 | 0 | MakeAndPushMessage(*pto, NetMsgType::INV, vInv); |
5733 | 0 | vInv.clear(); |
5734 | 0 | } |
5735 | 9.77M | } |
5736 | 41.8M | peer->m_blocks_for_inv_relay.clear(); |
5737 | 41.8M | } |
5738 | | |
5739 | 41.8M | if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { Branch (5739:49): [True: 41.8M, False: 0]
|
5740 | 41.8M | LOCK(tx_relay->m_tx_inventory_mutex); |
5741 | | // Check whether periodic sends should happen |
5742 | 41.8M | bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan); |
5743 | 41.8M | if (tx_relay->m_next_inv_send_time < current_time) { Branch (5743:21): [True: 4.30M, False: 37.5M]
|
5744 | 4.30M | fSendTrickle = true; |
5745 | 4.30M | if (pto->IsInboundConn()) { Branch (5745:25): [True: 1.52M, False: 2.78M]
|
5746 | 1.52M | tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL); |
5747 | 2.78M | } else { |
5748 | 2.78M | tx_relay->m_next_inv_send_time = current_time + m_rng.rand_exp_duration(OUTBOUND_INVENTORY_BROADCAST_INTERVAL); |
5749 | 2.78M | } |
5750 | 4.30M | } |
5751 | | |
5752 | | // Time to send but the peer has requested we not relay transactions. |
5753 | 41.8M | if (fSendTrickle) { Branch (5753:21): [True: 4.30M, False: 37.5M]
|
5754 | 4.30M | LOCK(tx_relay->m_bloom_filter_mutex); |
5755 | 4.30M | if (!tx_relay->m_relay_txs) tx_relay->m_tx_inventory_to_send.clear(); Branch (5755:25): [True: 0, False: 4.30M]
|
5756 | 4.30M | } |
5757 | | |
5758 | | // Respond to BIP35 mempool requests |
5759 | 41.8M | if (fSendTrickle && tx_relay->m_send_mempool) { Branch (5759:21): [True: 4.30M, False: 37.5M]
Branch (5759:37): [True: 155, False: 4.30M]
|
5760 | 155 | auto vtxinfo = m_mempool.infoAll(); |
5761 | 155 | tx_relay->m_send_mempool = false; |
5762 | 155 | const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()}; |
5763 | | |
5764 | 155 | LOCK(tx_relay->m_bloom_filter_mutex); |
5765 | | |
5766 | 1.99k | for (const auto& txinfo : vtxinfo) { Branch (5766:45): [True: 1.99k, False: 155]
|
5767 | 1.99k | CInv inv{ |
5768 | 1.99k | peer->m_wtxid_relay ? MSG_WTX : MSG_TX, Branch (5768:29): [True: 1.99k, False: 0]
|
5769 | 1.99k | peer->m_wtxid_relay ? Branch (5769:29): [True: 1.99k, False: 0]
|
5770 | 1.99k | txinfo.tx->GetWitnessHash().ToUint256() : |
5771 | 1.99k | txinfo.tx->GetHash().ToUint256(), |
5772 | 1.99k | }; |
5773 | 1.99k | tx_relay->m_tx_inventory_to_send.erase(inv.hash); |
5774 | | |
5775 | | // Don't send transactions that peers will not put into their mempool |
5776 | 1.99k | if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { Branch (5776:29): [True: 89, False: 1.90k]
|
5777 | 89 | continue; |
5778 | 89 | } |
5779 | 1.90k | if (tx_relay->m_bloom_filter) { Branch (5779:29): [True: 212, False: 1.69k]
|
5780 | 212 | if (!tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue; Branch (5780:33): [True: 127, False: 85]
|
5781 | 212 | } |
5782 | 1.77k | tx_relay->m_tx_inventory_known_filter.insert(inv.hash); |
5783 | 1.77k | vInv.push_back(inv); |
5784 | 1.77k | if (vInv.size() == MAX_INV_SZ) { Branch (5784:29): [True: 0, False: 1.77k]
|
5785 | 0 | MakeAndPushMessage(*pto, NetMsgType::INV, vInv); |
5786 | 0 | vInv.clear(); |
5787 | 0 | } |
5788 | 1.77k | } |
5789 | 155 | } |
5790 | | |
5791 | | // Determine transactions to relay |
5792 | 41.8M | if (fSendTrickle) { Branch (5792:21): [True: 4.30M, False: 37.5M]
|
5793 | | // Produce a vector with all candidates for sending |
5794 | 4.30M | std::vector<std::set<uint256>::iterator> vInvTx; |
5795 | 4.30M | vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size()); |
5796 | 4.42M | for (std::set<uint256>::iterator it = tx_relay->m_tx_inventory_to_send.begin(); it != tx_relay->m_tx_inventory_to_send.end(); it++) { Branch (5796:101): [True: 114k, False: 4.30M]
|
5797 | 114k | vInvTx.push_back(it); |
5798 | 114k | } |
5799 | 4.30M | const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()}; |
5800 | | // Topologically and fee-rate sort the inventory we send for privacy and priority reasons. |
5801 | | // A heap is used so that not all items need sorting if only a few are being sent. |
5802 | 4.30M | CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, peer->m_wtxid_relay); |
5803 | 4.30M | std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); |
5804 | | // No reason to drain out at many times the network's capacity, |
5805 | | // especially since we have many peers and some will draw much shorter delays. |
5806 | 4.30M | unsigned int nRelayedTransactions = 0; |
5807 | 4.30M | LOCK(tx_relay->m_bloom_filter_mutex); |
5808 | 4.30M | size_t broadcast_max{INVENTORY_BROADCAST_TARGET + (tx_relay->m_tx_inventory_to_send.size()/1000)*5}; |
5809 | 4.30M | broadcast_max = std::min<size_t>(INVENTORY_BROADCAST_MAX, broadcast_max); |
5810 | 4.41M | while (!vInvTx.empty() && nRelayedTransactions < broadcast_max) { Branch (5810:28): [True: 109k, False: 4.30M]
Branch (5810:47): [True: 108k, False: 315]
|
5811 | | // Fetch the top element from the heap |
5812 | 108k | std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); |
5813 | 108k | std::set<uint256>::iterator it = vInvTx.back(); |
5814 | 108k | vInvTx.pop_back(); |
5815 | 108k | uint256 hash = *it; |
5816 | 108k | CInv inv(peer->m_wtxid_relay ? MSG_WTX : MSG_TX, hash); Branch (5816:34): [True: 108k, False: 0]
|
5817 | | // Remove it from the to-be-sent set |
5818 | 108k | tx_relay->m_tx_inventory_to_send.erase(it); |
5819 | | // Check if not in the filter already |
5820 | 108k | if (tx_relay->m_tx_inventory_known_filter.contains(hash)) { Branch (5820:29): [True: 354, False: 108k]
|
5821 | 354 | continue; |
5822 | 354 | } |
5823 | | // Not in the mempool anymore? don't bother sending it. |
5824 | 108k | auto txinfo = m_mempool.info(ToGenTxid(inv)); |
5825 | 108k | if (!txinfo.tx) { Branch (5825:29): [True: 13.9k, False: 94.5k]
|
5826 | 13.9k | continue; |
5827 | 13.9k | } |
5828 | | // Peer told you to not send transactions at that feerate? Don't bother sending it. |
5829 | 94.5k | if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { Branch (5829:29): [True: 157, False: 94.4k]
|
5830 | 157 | continue; |
5831 | 157 | } |
5832 | 94.4k | if (tx_relay->m_bloom_filter && !tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue; Branch (5832:29): [True: 746, False: 93.6k]
Branch (5832:57): [True: 130, False: 616]
|
5833 | | // Send |
5834 | 94.2k | vInv.push_back(inv); |
5835 | 94.2k | nRelayedTransactions++; |
5836 | 94.2k | if (vInv.size() == MAX_INV_SZ) { Branch (5836:29): [True: 0, False: 94.2k]
|
5837 | 0 | MakeAndPushMessage(*pto, NetMsgType::INV, vInv); |
5838 | 0 | vInv.clear(); |
5839 | 0 | } |
5840 | 94.2k | tx_relay->m_tx_inventory_known_filter.insert(hash); |
5841 | 94.2k | } |
5842 | | |
5843 | | // Ensure we'll respond to GETDATA requests for anything we've just announced |
5844 | 4.30M | LOCK(m_mempool.cs); |
5845 | 4.30M | tx_relay->m_last_inv_sequence = m_mempool.GetSequence(); |
5846 | 4.30M | } |
5847 | 41.8M | } |
5848 | 41.8M | if (!vInv.empty()) Branch (5848:13): [True: 9.78M, False: 32.0M]
|
5849 | 9.78M | MakeAndPushMessage(*pto, NetMsgType::INV, vInv); |
5850 | | |
5851 | | // Detect whether we're stalling |
5852 | 41.8M | auto stalling_timeout = m_block_stalling_timeout.load(); |
5853 | 41.8M | if (state.m_stalling_since.count() && state.m_stalling_since < current_time - stalling_timeout) { Branch (5853:13): [True: 0, False: 41.8M]
Branch (5853:13): [True: 0, False: 41.8M]
Branch (5853:47): [True: 0, False: 0]
|
5854 | | // Stalling only triggers when the block download window cannot move. During normal steady state, |
5855 | | // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection |
5856 | | // should only happen during initial block download. |
5857 | 0 | LogInfo("Peer is stalling block download, %s\n", pto->DisconnectMsg(fLogIPs)); |
5858 | 0 | pto->fDisconnect = true; |
5859 | | // Increase timeout for the next peer so that we don't disconnect multiple peers if our own |
5860 | | // bandwidth is insufficient. |
5861 | 0 | const auto new_timeout = std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX); |
5862 | 0 | if (stalling_timeout != new_timeout && m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) { Branch (5862:17): [True: 0, False: 0]
Branch (5862:52): [True: 0, False: 0]
|
5863 | 0 | LogDebug(BCLog::NET, "Increased stalling timeout temporarily to %d seconds\n", count_seconds(new_timeout)); |
5864 | 0 | } |
5865 | 0 | return true; |
5866 | 0 | } |
5867 | | // In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N) |
5868 | | // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout. |
5869 | | // We compensate for other peers to prevent killing off peers due to our own downstream link |
5870 | | // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes |
5871 | | // to unreasonably increase our timeout. |
5872 | 41.8M | if (state.vBlocksInFlight.size() > 0) { Branch (5872:13): [True: 344k, False: 41.5M]
|
5873 | 344k | QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); |
5874 | 344k | int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1; |
5875 | 344k | if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) { Branch (5875:17): [True: 425, False: 344k]
|
5876 | 425 | LogInfo("Timeout downloading block %s, %s\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->DisconnectMsg(fLogIPs)); |
5877 | 425 | pto->fDisconnect = true; |
5878 | 425 | return true; |
5879 | 425 | } |
5880 | 344k | } |
5881 | | // Check for headers sync timeouts |
5882 | 41.8M | if (state.fSyncStarted && peer->m_headers_sync_timeout < std::chrono::microseconds::max()) { Branch (5882:13): [True: 41.8M, False: 0]
Branch (5882:13): [True: 88.7k, False: 41.7M]
Branch (5882:35): [True: 88.7k, False: 41.7M]
|
5883 | | // Detect whether this is a stalling initial-headers-sync peer |
5884 | 88.7k | if (m_chainman.m_best_header->Time() <= NodeClock::now() - 24h) { Branch (5884:17): [True: 0, False: 88.7k]
|
5885 | 0 | if (current_time > peer->m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) { Branch (5885:21): [True: 0, False: 0]
Branch (5885:68): [True: 0, False: 0]
Branch (5885:89): [True: 0, False: 0]
|
5886 | | // Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer, |
5887 | | // and we have others we could be using instead. |
5888 | | // Note: If all our peers are inbound, then we won't |
5889 | | // disconnect our sync peer for stalling; we have bigger |
5890 | | // problems if we can't get any outbound peers. |
5891 | 0 | if (!pto->HasPermission(NetPermissionFlags::NoBan)) { Branch (5891:25): [True: 0, False: 0]
|
5892 | 0 | LogInfo("Timeout downloading headers, %s\n", pto->DisconnectMsg(fLogIPs)); |
5893 | 0 | pto->fDisconnect = true; |
5894 | 0 | return true; |
5895 | 0 | } else { |
5896 | 0 | LogInfo("Timeout downloading headers from noban peer, not %s\n", pto->DisconnectMsg(fLogIPs)); |
5897 | | // Reset the headers sync state so that we have a |
5898 | | // chance to try downloading from a different peer. |
5899 | | // Note: this will also result in at least one more |
5900 | | // getheaders message to be sent to |
5901 | | // this peer (eventually). |
5902 | 0 | state.fSyncStarted = false; |
5903 | 0 | nSyncStarted--; |
5904 | 0 | peer->m_headers_sync_timeout = 0us; |
5905 | 0 | } |
5906 | 0 | } |
5907 | 88.7k | } else { |
5908 | | // After we've caught up once, reset the timeout so we can't trigger |
5909 | | // disconnect later. |
5910 | 88.7k | peer->m_headers_sync_timeout = std::chrono::microseconds::max(); |
5911 | 88.7k | } |
5912 | 88.7k | } |
5913 | | |
5914 | | // Check that outbound peers have reasonable chains |
5915 | | // GetTime() is used by this anti-DoS logic so we can test this using mocktime |
5916 | 41.8M | ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>()); |
5917 | | |
5918 | | // |
5919 | | // Message: getdata (blocks) |
5920 | | // |
5921 | 41.8M | std::vector<CInv> vGetData; |
5922 | 41.8M | if (CanServeBlocks(*peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.IsInitialBlockDownload()) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { Branch (5922:13): [True: 41.8M, False: 0]
Branch (5922:40): [True: 40.6M, False: 1.14M]
Branch (5922:77): [True: 40.6M, False: 0]
Branch (5922:103): [True: 1.14M, False: 0]
Branch (5922:144): [True: 41.8M, False: 5.89k]
|
5923 | 41.8M | std::vector<const CBlockIndex*> vToDownload; |
5924 | 41.8M | NodeId staller = -1; |
5925 | 41.8M | auto get_inflight_budget = [&state]() { |
5926 | 41.8M | return std::max(0, MAX_BLOCKS_IN_TRANSIT_PER_PEER - static_cast<int>(state.vBlocksInFlight.size())); |
5927 | 41.8M | }; |
5928 | | |
5929 | | // If a snapshot chainstate is in use, we want to find its next blocks |
5930 | | // before the background chainstate to prioritize getting to network tip. |
5931 | 41.8M | FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload, staller); |
5932 | 41.8M | if (m_chainman.BackgroundSyncInProgress() && !IsLimitedPeer(*peer)) { Branch (5932:17): [True: 0, False: 41.8M]
Branch (5932:58): [True: 0, False: 0]
|
5933 | | // If the background tip is not an ancestor of the snapshot block, |
5934 | | // we need to start requesting blocks from their last common ancestor. |
5935 | 0 | const CBlockIndex *from_tip = LastCommonAncestor(m_chainman.GetBackgroundSyncTip(), m_chainman.GetSnapshotBaseBlock()); |
5936 | 0 | TryDownloadingHistoricalBlocks( |
5937 | 0 | *peer, |
5938 | 0 | get_inflight_budget(), |
5939 | 0 | vToDownload, from_tip, |
5940 | 0 | Assert(m_chainman.GetSnapshotBaseBlock())); |
5941 | 0 | } |
5942 | 41.8M | for (const CBlockIndex *pindex : vToDownload) { Branch (5942:44): [True: 6.87k, False: 41.8M]
|
5943 | 6.87k | uint32_t nFetchFlags = GetFetchFlags(*peer); |
5944 | 6.87k | vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()); |
5945 | 6.87k | BlockRequested(pto->GetId(), *pindex); |
5946 | 6.87k | LogDebug(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), |
5947 | 6.87k | pindex->nHeight, pto->GetId()); |
5948 | 6.87k | } |
5949 | 41.8M | if (state.vBlocksInFlight.empty() && staller != -1) { Branch (5949:17): [True: 41.4M, False: 341k]
Branch (5949:50): [True: 0, False: 41.4M]
|
5950 | 0 | if (State(staller)->m_stalling_since == 0us) { Branch (5950:21): [True: 0, False: 0]
|
5951 | 0 | State(staller)->m_stalling_since = current_time; |
5952 | 0 | LogDebug(BCLog::NET, "Stall started peer=%d\n", staller); |
5953 | 0 | } |
5954 | 0 | } |
5955 | 41.8M | } |
5956 | | |
5957 | | // |
5958 | | // Message: getdata (transactions) |
5959 | | // |
5960 | 41.8M | { |
5961 | 41.8M | LOCK(m_tx_download_mutex); |
5962 | 41.8M | for (const GenTxid& gtxid : m_txdownloadman.GetRequestsToSend(pto->GetId(), current_time)) { Branch (5962:39): [True: 310k, False: 41.8M]
|
5963 | 310k | vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*peer)), gtxid.GetHash()); Branch (5963:39): [True: 189k, False: 120k]
|
5964 | 310k | if (vGetData.size() >= MAX_GETDATA_SZ) { Branch (5964:21): [True: 0, False: 310k]
|
5965 | 0 | MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData); |
5966 | 0 | vGetData.clear(); |
5967 | 0 | } |
5968 | 310k | } |
5969 | 41.8M | } |
5970 | | |
5971 | 41.8M | if (!vGetData.empty()) Branch (5971:13): [True: 188k, False: 41.6M]
|
5972 | 188k | MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData); |
5973 | 41.8M | } // release cs_main |
5974 | 0 | MaybeSendFeefilter(*pto, *peer, current_time); |
5975 | 41.8M | return true; |
5976 | 41.8M | } |