Line data Source code
1 : // Copyright (c) 2009-2010 Satoshi Nakamoto
2 : // Copyright (c) 2009-2022 The Bitcoin Core developers
3 : // Distributed under the MIT software license, see the accompanying
4 : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 :
6 : #include <net_processing.h>
7 :
8 : #include <addrman.h>
9 : #include <banman.h>
10 : #include <blockencodings.h>
11 : #include <blockfilter.h>
12 : #include <chainparams.h>
13 : #include <consensus/amount.h>
14 : #include <consensus/validation.h>
15 : #include <deploymentstatus.h>
16 : #include <hash.h>
17 2 : #include <headerssync.h>
18 2 : #include <index/blockfilterindex.h>
19 : #include <kernel/mempool_entry.h>
20 : #include <logging.h>
21 : #include <merkleblock.h>
22 : #include <netbase.h>
23 : #include <netmessagemaker.h>
24 : #include <node/blockstorage.h>
25 : #include <node/txreconciliation.h>
26 : #include <policy/fees.h>
27 : #include <policy/policy.h>
28 : #include <policy/settings.h>
29 : #include <primitives/block.h>
30 : #include <primitives/transaction.h>
31 : #include <random.h>
32 : #include <reverse_iterator.h>
33 : #include <scheduler.h>
34 : #include <streams.h>
35 : #include <sync.h>
36 : #include <timedata.h>
37 : #include <tinyformat.h>
38 : #include <txmempool.h>
39 : #include <txorphanage.h>
40 : #include <txrequest.h>
41 : #include <util/check.h> // For NDEBUG compile time check
42 : #include <util/strencodings.h>
43 : #include <util/trace.h>
44 : #include <validation.h>
45 :
46 : #include <algorithm>
47 : #include <atomic>
48 : #include <chrono>
49 : #include <future>
50 : #include <memory>
51 : #include <optional>
52 : #include <typeinfo>
53 :
54 : /** Headers download timeout.
55 : * Timeout = base + per_header * (expected number of headers) */
56 : static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
57 : static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
58 : /** How long to wait for a peer to respond to a getheaders request */
59 : static constexpr auto HEADERS_RESPONSE_TIME{2min};
60 : /** Protect at least this many outbound peers from disconnection due to slow/
61 : * behind headers chain.
62 : */
63 : static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
64 : /** Timeout for (unprotected) outbound peers to sync to our chainwork */
65 : static constexpr auto CHAIN_SYNC_TIMEOUT{20min};
66 : /** How frequently to check for stale tips */
67 : static constexpr auto STALE_CHECK_INTERVAL{10min};
68 : /** How frequently to check for extra outbound peers and disconnect */
69 : static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s};
70 : /** Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict */
71 : static constexpr auto MINIMUM_CONNECT_TIME{30s};
72 : /** SHA256("main address relay")[0:8] */
73 : static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
74 2 : /// Age after which a stale block will no longer be served if requested as
75 : /// protection against fingerprinting. Set to one month, denominated in seconds.
76 : static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
77 : /// Age after which a block is considered historical for purposes of rate
78 : /// limiting block relay. Set to one week, denominated in seconds.
79 : static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
80 : /** Time between pings automatically sent out for latency probing and keepalive */
81 : static constexpr auto PING_INTERVAL{2min};
82 : /** The maximum number of entries in a locator */
83 2 : static const unsigned int MAX_LOCATOR_SZ = 101;
84 : /** The maximum number of entries in an 'inv' protocol message */
85 : static const unsigned int MAX_INV_SZ = 50000;
86 : /** Maximum number of in-flight transaction requests from a peer. It is not a hard limit, but the threshold at which
87 : * point the OVERLOADED_PEER_TX_DELAY kicks in. */
88 : static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT = 100;
89 : /** Maximum number of transactions to consider for requesting, per peer. It provides a reasonable DoS limit to
90 : * per-peer memory usage spent on announcements, while covering peers continuously sending INVs at the maximum
91 : * rate (by our own policy, see INVENTORY_BROADCAST_PER_SECOND) for several minutes, while not receiving
92 : * the actual transaction (from any peer) in response to requests for them. */
93 : static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 5000;
94 : /** How long to delay requesting transactions via txids, if we have wtxid-relaying peers */
95 : static constexpr auto TXID_RELAY_DELAY{2s};
96 : /** How long to delay requesting transactions from non-preferred peers */
97 : static constexpr auto NONPREF_PEER_TX_DELAY{2s};
98 : /** How long to delay requesting transactions from overloaded peers (see MAX_PEER_TX_REQUEST_IN_FLIGHT). */
99 : static constexpr auto OVERLOADED_PEER_TX_DELAY{2s};
100 : /** How long to wait before downloading a transaction from an additional peer */
101 : static constexpr auto GETDATA_TX_INTERVAL{60s};
102 : /** Limit to avoid sending big packets. Not used in processing incoming GETDATA for compatibility */
103 : static const unsigned int MAX_GETDATA_SZ = 1000;
104 : /** Number of blocks that can be requested at any given time from a single peer. */
105 : static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
106 : /** Default time during which a peer must stall block download progress before being disconnected.
107 : * the actual timeout is increased temporarily if peers are disconnected for hitting the timeout */
108 : static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s};
109 : /** Maximum timeout for stalling block download. */
110 : static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s};
111 : /** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
112 : * less than this number, we reached its tip. Changing this value is a protocol upgrade. */
113 : static const unsigned int MAX_HEADERS_RESULTS = 2000;
114 : /** Maximum depth of blocks we're willing to serve as compact blocks to peers
115 : * when requested. For older blocks, a regular BLOCK response will be sent. */
116 : static const int MAX_CMPCTBLOCK_DEPTH = 5;
117 : /** Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for. */
118 : static const int MAX_BLOCKTXN_DEPTH = 10;
119 : /** Size of the "block download window": how far ahead of our current height do we fetch?
120 : * Larger windows tolerate larger download speed differences between peer, but increase the potential
121 : * degree of disordering of blocks on disk (which make reindexing and pruning harder). We'll probably
122 : * want to make this a per-peer adaptive value at some point. */
123 : static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
124 : /** Block download timeout base, expressed in multiples of the block interval (i.e. 10 min) */
125 : static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
126 : /** Additional block download timeout per parallel downloading peer (i.e. 5 min) */
127 : static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
128 : /** Maximum number of headers to announce when relaying blocks with headers message.*/
129 : static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
130 : /** Maximum number of unconnecting headers announcements before DoS score */
131 : static const int MAX_NUM_UNCONNECTING_HEADERS_MSGS = 10;
132 : /** Minimum blocks required to signal NODE_NETWORK_LIMITED */
133 : static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
134 : /** Average delay between local address broadcasts */
135 : static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h};
136 : /** Average delay between peer address broadcasts */
137 : static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s};
138 : /** Delay between rotating the peers we relay a particular address to */
139 : static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h};
140 : /** Average delay between trickled inventory transmissions for inbound peers.
141 : * Blocks and peers with NetPermissionFlags::NoBan permission bypass this. */
142 : static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s};
143 : /** Average delay between trickled inventory transmissions for outbound peers.
144 : * Use a smaller delay as there is less privacy concern for them.
145 : * Blocks and peers with NetPermissionFlags::NoBan permission bypass this. */
146 : static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL{2s};
147 : /** Maximum rate of inventory items to send per second.
148 : * Limits the impact of low-fee transaction floods. */
149 : static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
150 : /** Target number of tx inventory items to send per transmission. */
151 : static constexpr unsigned int INVENTORY_BROADCAST_TARGET = INVENTORY_BROADCAST_PER_SECOND * count_seconds(INBOUND_INVENTORY_BROADCAST_INTERVAL);
152 : /** Maximum number of inventory items to send per transmission. */
153 : static constexpr unsigned int INVENTORY_BROADCAST_MAX = 1000;
154 : static_assert(INVENTORY_BROADCAST_MAX >= INVENTORY_BROADCAST_TARGET, "INVENTORY_BROADCAST_MAX too low");
155 : static_assert(INVENTORY_BROADCAST_MAX <= MAX_PEER_TX_ANNOUNCEMENTS, "INVENTORY_BROADCAST_MAX too high");
156 : /** Average delay between feefilter broadcasts in seconds. */
157 : static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min};
158 : /** Maximum feefilter broadcast delay after significant change. */
159 : static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min};
160 : /** Maximum number of compact filters that may be requested with one getcfilters. See BIP 157. */
161 : static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
162 : /** Maximum number of cf hashes that may be requested with one getcfheaders. See BIP 157. */
163 : static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
164 : /** the maximum percentage of addresses from our addrman to return in response to a getaddr message. */
165 : static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
166 : /** The maximum number of address records permitted in an ADDR message. */
167 : static constexpr size_t MAX_ADDR_TO_SEND{1000};
168 : /** The maximum rate of address records we're willing to process on average. Can be bypassed using
169 : * the NetPermissionFlags::Addr permission. */
170 : static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1};
171 : /** The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND
172 : * based increments won't go above this, but the MAX_ADDR_TO_SEND increment following GETADDR
173 : * is exempt from this limit). */
174 : static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET{MAX_ADDR_TO_SEND};
175 : /** The compactblocks version we support. See BIP 152. */
176 : static constexpr uint64_t CMPCTBLOCKS_VERSION{2};
177 :
178 : // Internal stuff
179 : namespace {
180 : /** Blocks that are in flight, and that are in the queue to be downloaded. */
181 : struct QueuedBlock {
182 : /** BlockIndex. We must have this since we only request blocks when we've already validated the header. */
183 : const CBlockIndex* pindex;
184 : /** Optional, used for CMPCTBLOCK downloads */
185 : std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
186 : };
187 :
188 : /**
189 : * Data structure for an individual peer. This struct is not protected by
190 : * cs_main since it does not contain validation-critical data.
191 : *
192 : * Memory is owned by shared pointers and this object is destructed when
193 : * the refcount drops to zero.
194 : *
195 : * Mutexes inside this struct must not be held when locking m_peer_mutex.
196 : *
197 : * TODO: move most members from CNodeState to this structure.
198 : * TODO: move remaining application-layer data members from CNode to this structure.
199 : */
200 : struct Peer {
201 : /** Same id as the CNode object for this peer */
202 : const NodeId m_id{0};
203 :
204 : /** Services we offered to this peer.
205 : *
206 : * This is supplied by CConnman during peer initialization. It's const
207 : * because there is no protocol defined for renegotiating services
208 : * initially offered to a peer. The set of local services we offer should
209 : * not change after initialization.
210 : *
211 : * An interesting example of this is NODE_NETWORK and initial block
212 : * download: a node which starts up from scratch doesn't have any blocks
213 : * to serve, but still advertises NODE_NETWORK because it will eventually
214 : * fulfill this role after IBD completes. P2P code is written in such a
215 : * way that it can gracefully handle peers who don't make good on their
216 : * service advertisements. */
217 : const ServiceFlags m_our_services;
218 : /** Services this peer offered to us. */
219 0 : std::atomic<ServiceFlags> m_their_services{NODE_NONE};
220 :
221 : /** Protects misbehavior data members */
222 : Mutex m_misbehavior_mutex;
223 : /** Accumulated misbehavior score for this peer */
224 0 : int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
225 : /** Whether this peer should be disconnected and marked as discouraged (unless it has NetPermissionFlags::NoBan permission). */
226 0 : bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
227 :
228 : /** Protects block inventory data members */
229 : Mutex m_block_inv_mutex;
230 : /** List of blocks that we'll announce via an `inv` message.
231 : * There is no final sorting before sending, as they are always sent
232 : * immediately and in the order requested. */
233 : std::vector<uint256> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex);
234 : /** Unfiltered list of blocks that we'd like to announce via a `headers`
235 : * message. If we can't announce via a `headers` message, we'll fall back to
236 : * announcing via `inv`. */
237 : std::vector<uint256> m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex);
238 : /** The final block hash that we sent in an `inv` message to this peer.
239 : * When the peer requests this block, we send an `inv` message to trigger
240 : * the peer to request the next sequence of block hashes.
241 : * Most peers use headers-first syncing, which doesn't use this mechanism */
242 0 : uint256 m_continuation_block GUARDED_BY(m_block_inv_mutex) {};
243 :
244 : /** This peer's reported block height when we connected */
245 0 : std::atomic<int> m_starting_height{-1};
246 :
247 : /** The pong reply we're expecting, or 0 if no pong expected. */
248 0 : std::atomic<uint64_t> m_ping_nonce_sent{0};
249 : /** When the last ping was sent, or 0 if no ping was ever sent */
250 0 : std::atomic<std::chrono::microseconds> m_ping_start{0us};
251 : /** Whether a ping has been requested by the user */
252 0 : std::atomic<bool> m_ping_queued{false};
253 :
254 : /** Whether this peer relays txs via wtxid */
255 0 : std::atomic<bool> m_wtxid_relay{false};
256 : /** The feerate in the most recent BIP133 `feefilter` message sent to the peer.
257 : * It is *not* a p2p protocol violation for the peer to send us
258 : * transactions with a lower fee rate than this. See BIP133. */
259 0 : CAmount m_fee_filter_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
260 : /** Timestamp after which we will send the next BIP133 `feefilter` message
261 : * to the peer. */
262 0 : std::chrono::microseconds m_next_send_feefilter GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
263 :
264 0 : struct TxRelay {
265 : mutable RecursiveMutex m_bloom_filter_mutex;
266 : /** Whether we relay transactions to this peer. */
267 0 : bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false};
268 : /** A bloom filter for which transactions to announce to the peer. See BIP37. */
269 0 : std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr};
270 :
271 : mutable RecursiveMutex m_tx_inventory_mutex;
272 : /** A filter of all the (w)txids that the peer has announced to
273 : * us or we have announced to the peer. We use this to avoid announcing
274 : * the same (w)txid to a peer that already has the transaction. */
275 0 : CRollingBloomFilter m_tx_inventory_known_filter GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
276 : /** Set of transaction ids we still have to announce (txid for
277 : * non-wtxid-relay peers, wtxid for wtxid-relay peers). We use the
278 : * mempool to sort transactions in dependency order before relay, so
279 : * this does not have to be sorted. */
280 : std::set<uint256> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex);
281 : /** Whether the peer has requested us to send our complete mempool. Only
282 : * permitted if the peer has NetPermissionFlags::Mempool or we advertise
283 : * NODE_BLOOM. See BIP35. */
284 0 : bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false};
285 : /** The next time after which we will send an `inv` message containing
286 : * transaction announcements to this peer. */
287 0 : std::chrono::microseconds m_next_inv_send_time GUARDED_BY(m_tx_inventory_mutex){0};
288 : /** The mempool sequence num at which we sent the last `inv` message to this peer.
289 : * Can relay txs with lower sequence numbers than this (see CTxMempool::info_for_relay). */
290 0 : uint64_t m_last_inv_sequence GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1};
291 :
292 : /** Minimum fee rate with which to filter transaction announcements to this node. See BIP133. */
293 0 : std::atomic<CAmount> m_fee_filter_received{0};
294 : };
295 :
296 : /* Initializes a TxRelay struct for this peer. Can be called at most once for a peer. */
297 0 : TxRelay* SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex)
298 : {
299 0 : LOCK(m_tx_relay_mutex);
300 0 : Assume(!m_tx_relay);
301 0 : m_tx_relay = std::make_unique<Peer::TxRelay>();
302 0 : return m_tx_relay.get();
303 0 : };
304 :
305 0 : TxRelay* GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex)
306 : {
307 0 : return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
308 : };
309 :
310 : /** A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */
311 : std::vector<CAddress> m_addrs_to_send GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
312 : /** Probabilistic filter to track recent addr messages relayed with this
313 : * peer. Used to avoid relaying redundant addresses to this peer.
314 : *
315 : * We initialize this filter for outbound peers (other than
316 : * block-relay-only connections) or when an inbound peer sends us an
317 : * address related message (ADDR, ADDRV2, GETADDR).
318 : *
319 : * Presence of this filter must correlate with m_addr_relay_enabled.
320 : **/
321 : std::unique_ptr<CRollingBloomFilter> m_addr_known GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
322 : /** Whether we are participating in address relay with this connection.
323 : *
324 : * We set this bool to true for outbound peers (other than
325 : * block-relay-only connections), or when an inbound peer sends us an
326 : * address related message (ADDR, ADDRV2, GETADDR).
327 : *
328 : * We use this bool to decide whether a peer is eligible for gossiping
329 : * addr messages. This avoids relaying to peers that are unlikely to
330 : * forward them, effectively blackholing self announcements. Reasons
331 : * peers might support addr relay on the link include that they connected
332 : * to us as a block-relay-only peer or they are a light client.
333 : *
334 : * This field must correlate with whether m_addr_known has been
335 : * initialized.*/
336 0 : std::atomic_bool m_addr_relay_enabled{false};
337 : /** Whether a getaddr request to this peer is outstanding. */
338 0 : bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
339 : /** Guards address sending timers. */
340 : mutable Mutex m_addr_send_times_mutex;
341 : /** Time point to send the next ADDR message to this peer. */
342 0 : std::chrono::microseconds m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
343 : /** Time point to possibly re-announce our local address to this peer. */
344 0 : std::chrono::microseconds m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
345 : /** Whether the peer has signaled support for receiving ADDRv2 (BIP155)
346 : * messages, indicating a preference to receive ADDRv2 instead of ADDR ones. */
347 0 : std::atomic_bool m_wants_addrv2{false};
348 : /** Whether this peer has already sent us a getaddr message. */
349 0 : bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
350 : /** Number of addresses that can be processed from this peer. Start at 1 to
351 : * permit self-announcement. */
352 0 : double m_addr_token_bucket GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1.0};
353 : /** When m_addr_token_bucket was last updated */
354 0 : std::chrono::microseconds m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){GetTime<std::chrono::microseconds>()};
355 : /** Total number of addresses that were dropped due to rate limiting. */
356 0 : std::atomic<uint64_t> m_addr_rate_limited{0};
357 : /** Total number of addresses that were processed (excludes rate-limited ones). */
358 0 : std::atomic<uint64_t> m_addr_processed{0};
359 :
360 : /** Whether we've sent this peer a getheaders in response to an inv prior to initial-headers-sync completing */
361 0 : bool m_inv_triggered_getheaders_before_sync GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
362 :
363 : /** Protects m_getdata_requests **/
364 : Mutex m_getdata_requests_mutex;
365 : /** Work queue of items requested by this peer **/
366 : std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
367 :
368 : /** Time of the last getheaders message to this peer */
369 0 : NodeClock::time_point m_last_getheaders_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){};
370 :
371 : /** Protects m_headers_sync **/
372 : Mutex m_headers_sync_mutex;
373 : /** Headers-sync state for this peer (eg for initial sync, or syncing large
374 : * reorgs) **/
375 0 : std::unique_ptr<HeadersSyncState> m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex) GUARDED_BY(m_headers_sync_mutex) {};
376 :
377 : /** Whether we've sent our peer a sendheaders message. **/
378 0 : std::atomic<bool> m_sent_sendheaders{false};
379 :
380 : /** Length of current-streak of unconnecting headers announcements */
381 0 : int m_num_unconnecting_headers_msgs GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
382 :
383 : /** When to potentially disconnect peer for stalling headers download */
384 0 : std::chrono::microseconds m_headers_sync_timeout GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0us};
385 :
386 : /** Whether this peer wants invs or headers (when possible) for block announcements */
387 0 : bool m_prefers_headers GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
388 :
389 0 : explicit Peer(NodeId id, ServiceFlags our_services)
390 0 : : m_id{id}
391 0 : , m_our_services{our_services}
392 0 : {}
393 :
394 : private:
395 : mutable Mutex m_tx_relay_mutex;
396 :
397 : /** Transaction relay data. May be a nullptr. */
398 : std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex);
399 : };
400 :
401 : using PeerRef = std::shared_ptr<Peer>;
402 :
403 : /**
404 : * Maintain validation-specific state about nodes, protected by cs_main, instead
405 : * by CNode's own locks. This simplifies asynchronous operation, where
406 : * processing of incoming data is done after the ProcessMessage call returns,
407 : * and we're no longer holding the node's locks.
408 : */
409 : struct CNodeState {
410 : //! The best known block we know this peer has announced.
411 0 : const CBlockIndex* pindexBestKnownBlock{nullptr};
412 : //! The hash of the last unknown block this peer has announced.
413 0 : uint256 hashLastUnknownBlock{};
414 : //! The last full block we both have.
415 0 : const CBlockIndex* pindexLastCommonBlock{nullptr};
416 : //! The best header we have sent our peer.
417 0 : const CBlockIndex* pindexBestHeaderSent{nullptr};
418 : //! Whether we've started headers synchronization with this peer.
419 0 : bool fSyncStarted{false};
420 : //! Since when we're stalling block download progress (in microseconds), or 0.
421 0 : std::chrono::microseconds m_stalling_since{0us};
422 : std::list<QueuedBlock> vBlocksInFlight;
423 : //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
424 0 : std::chrono::microseconds m_downloading_since{0us};
425 : //! Whether we consider this a preferred download peer.
426 0 : bool fPreferredDownload{false};
427 : /** Whether this peer wants invs or cmpctblocks (when possible) for block announcements. */
428 0 : bool m_requested_hb_cmpctblocks{false};
429 : /** Whether this peer will send us cmpctblocks if we request them. */
430 0 : bool m_provides_cmpctblocks{false};
431 :
432 : /** State used to enforce CHAIN_SYNC_TIMEOUT and EXTRA_PEER_CHECK_INTERVAL logic.
433 : *
434 : * Both are only in effect for outbound, non-manual, non-protected connections.
435 : * Any peer protected (m_protect = true) is not chosen for eviction. A peer is
436 : * marked as protected if all of these are true:
437 : * - its connection type is IsBlockOnlyConn() == false
438 : * - it gave us a valid connecting header
439 : * - we haven't reached MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT yet
440 : * - its chain tip has at least as much work as ours
441 : *
442 : * CHAIN_SYNC_TIMEOUT: if a peer's best known block has less work than our tip,
443 : * set a timeout CHAIN_SYNC_TIMEOUT in the future:
444 : * - If at timeout their best known block now has more work than our tip
445 : * when the timeout was set, then either reset the timeout or clear it
446 : * (after comparing against our current tip's work)
447 : * - If at timeout their best known block still has less work than our
448 : * tip did when the timeout was set, then send a getheaders message,
449 : * and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future.
450 : * If their best known block is still behind when that new timeout is
451 : * reached, disconnect.
452 : *
453 : * EXTRA_PEER_CHECK_INTERVAL: after each interval, if we have too many outbound peers,
454 : * drop the outbound one that least recently announced us a new block.
455 : */
456 0 : struct ChainSyncTimeoutState {
457 : //! A timeout used for checking whether our peer has sufficiently synced
458 0 : std::chrono::seconds m_timeout{0s};
459 : //! A header with the work we require on our peer's chain
460 0 : const CBlockIndex* m_work_header{nullptr};
461 : //! After timeout is reached, set to true after sending getheaders
462 0 : bool m_sent_getheaders{false};
463 : //! Whether this peer is protected from disconnection due to a bad/slow chain
464 0 : bool m_protect{false};
465 : };
466 :
467 : ChainSyncTimeoutState m_chain_sync;
468 :
469 : //! Time of last new block announcement
470 0 : int64_t m_last_block_announcement{0};
471 :
472 : //! Whether this peer is an inbound connection
473 : const bool m_is_inbound;
474 :
475 0 : CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {}
476 : };
477 :
478 : class PeerManagerImpl final : public PeerManager
479 : {
480 : public:
481 : PeerManagerImpl(CConnman& connman, AddrMan& addrman,
482 : BanMan* banman, ChainstateManager& chainman,
483 : CTxMemPool& pool, Options opts);
484 :
485 : /** Overridden from CValidationInterface. */
486 : void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override
487 : EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
488 : void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override
489 : EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
490 : void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override
491 : EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
492 : void BlockChecked(const CBlock& block, const BlockValidationState& state) override
493 : EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
494 : void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override
495 : EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
496 :
497 : /** Implement NetEventsInterface */
498 : void InitializeNode(CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
499 : void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex);
500 : bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override
501 : EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
502 : bool SendMessages(CNode* pto) override
503 : EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex, g_msgproc_mutex);
504 :
505 : /** Implement PeerManager */
506 : void StartScheduledTasks(CScheduler& scheduler) override;
507 : void CheckForStaleTipAndEvictPeers() override;
508 : std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override
509 : EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
510 : bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
511 0 : bool IgnoresIncomingTxs() override { return m_opts.ignore_incoming_txs; }
512 : void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
513 : void RelayTransaction(const uint256& txid, const uint256& wtxid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
514 0 : void SetBestHeight(int height) override { m_best_height = height; };
515 0 : void UnitTestMisbehaving(NodeId peer_id, int howmuch) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) { Misbehaving(*Assert(GetPeerRef(peer_id)), howmuch, ""); };
516 : void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv,
517 : const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override
518 : EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
519 : void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) override;
520 :
521 : private:
522 : /** Consider evicting an outbound peer based on the amount of time they've been behind our tip */
523 : void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex);
524 :
525 : /** If we have extra outbound peers, try to disconnect the one with the oldest block announcement */
526 : void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
527 :
528 : /** Retrieve unbroadcast transactions from the mempool and reattempt sending to peers */
529 : void ReattemptInitialBroadcast(CScheduler& scheduler) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
530 :
531 : /** Get a shared pointer to the Peer object.
532 : * May return an empty shared_ptr if the Peer object can't be found. */
533 : PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
534 :
535 : /** Get a shared pointer to the Peer object and remove it from m_peer_map.
536 : * May return an empty shared_ptr if the Peer object can't be found. */
537 : PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
538 :
539 : /**
540 : * Increment peer's misbehavior score. If the new value >= DISCOURAGEMENT_THRESHOLD, mark the node
541 : * to be discouraged, meaning the peer might be disconnected and added to the discouragement filter.
542 : */
543 : void Misbehaving(Peer& peer, int howmuch, const std::string& message);
544 :
545 : /**
546 : * Potentially mark a node discouraged based on the contents of a BlockValidationState object
547 : *
548 : * @param[in] via_compact_block this bool is passed in because net_processing should
549 : * punish peers differently depending on whether the data was provided in a compact
550 : * block message or not. If the compact block had a valid header, but contained invalid
551 : * txs, the peer should not be punished. See BIP 152.
552 : *
553 : * @return Returns true if the peer was punished (probably disconnected)
554 : */
555 : bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
556 : bool via_compact_block, const std::string& message = "")
557 : EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
558 :
559 : /**
560 : * Potentially disconnect and discourage a node based on the contents of a TxValidationState object
561 : *
562 : * @return Returns true if the peer was punished (probably disconnected)
563 : */
564 : bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
565 : EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
566 :
567 : /** Maybe disconnect a peer and discourage future connections from its address.
568 : *
569 : * @param[in] pnode The node to check.
570 : * @param[in] peer The peer object to check.
571 : * @return True if the peer was marked for disconnection in this function
572 : */
573 : bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer);
574 :
575 : /**
576 : * Reconsider orphan transactions after a parent has been accepted to the mempool.
577 : *
578 : * @peer[in] peer The peer whose orphan transactions we will reconsider. Generally only
579 : * one orphan will be reconsidered on each call of this function. If an
580 : * accepted orphan has orphaned children, those will need to be
581 : * reconsidered, creating more work, possibly for other peers.
582 : * @return True if meaningful work was done (an orphan was accepted/rejected).
583 : * If no meaningful work was done, then the work set for this peer
584 : * will be empty.
585 : */
586 : bool ProcessOrphanTx(Peer& peer)
587 : EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
588 :
589 : /** Process a single headers message from a peer.
590 : *
591 : * @param[in] pfrom CNode of the peer
592 : * @param[in] peer The peer sending us the headers
593 : * @param[in] headers The headers received. Note that this may be modified within ProcessHeadersMessage.
594 : * @param[in] via_compact_block Whether this header came in via compact block handling.
595 : */
596 : void ProcessHeadersMessage(CNode& pfrom, Peer& peer,
597 : std::vector<CBlockHeader>&& headers,
598 : bool via_compact_block)
599 : EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
600 : /** Various helpers for headers processing, invoked by ProcessHeadersMessage() */
601 : /** Return true if headers are continuous and have valid proof-of-work (DoS points assigned on failure) */
602 : bool CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer);
603 : /** Calculate an anti-DoS work threshold for headers chains */
604 : arith_uint256 GetAntiDoSWorkThreshold();
605 : /** Deal with state tracking and headers sync for peers that send the
606 : * occasional non-connecting header (this can happen due to BIP 130 headers
607 : * announcements for blocks interacting with the 2hr (MAX_FUTURE_BLOCK_TIME) rule). */
608 : void HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
609 : /** Return true if the headers connect to each other, false otherwise */
610 : bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const;
611 : /** Try to continue a low-work headers sync that has already begun.
612 : * Assumes the caller has already verified the headers connect, and has
613 : * checked that each header satisfies the proof-of-work target included in
614 : * the header.
615 : * @param[in] peer The peer we're syncing with.
616 : * @param[in] pfrom CNode of the peer
617 : * @param[in,out] headers The headers to be processed.
618 : * @return True if the passed in headers were successfully processed
619 : * as the continuation of a low-work headers sync in progress;
620 : * false otherwise.
621 : * If false, the passed in headers will be returned back to
622 : * the caller.
623 : * If true, the returned headers may be empty, indicating
624 : * there is no more work for the caller to do; or the headers
625 : * may be populated with entries that have passed anti-DoS
626 : * checks (and therefore may be validated for block index
627 : * acceptance by the caller).
628 : */
629 : bool IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom,
630 : std::vector<CBlockHeader>& headers)
631 : EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
632 : /** Check work on a headers chain to be processed, and if insufficient,
633 : * initiate our anti-DoS headers sync mechanism.
634 : *
635 : * @param[in] peer The peer whose headers we're processing.
636 : * @param[in] pfrom CNode of the peer
637 : * @param[in] chain_start_header Where these headers connect in our index.
638 : * @param[in,out] headers The headers to be processed.
639 : *
640 : * @return True if chain was low work (headers will be empty after
641 : * calling); false otherwise.
642 : */
643 : bool TryLowWorkHeadersSync(Peer& peer, CNode& pfrom,
644 : const CBlockIndex* chain_start_header,
645 : std::vector<CBlockHeader>& headers)
646 : EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
647 :
648 : /** Return true if the given header is an ancestor of
649 : * m_chainman.m_best_header or our current tip */
650 : bool IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
651 :
652 : /** Request further headers from this peer with a given locator.
653 : * We don't issue a getheaders message if we have a recent one outstanding.
654 : * This returns true if a getheaders is actually sent, and false otherwise.
655 : */
656 : bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
657 : /** Potentially fetch blocks from this peer upon receipt of a new headers tip */
658 : void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header);
659 : /** Update peer state based on received headers message */
660 : void UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer, const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
661 : EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
662 :
663 : void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req);
664 :
665 : /** Register with TxRequestTracker that an INV has been received from a
666 : * peer. The announcement parameters are decided in PeerManager and then
667 : * passed to TxRequestTracker. */
668 : void AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
669 : EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
670 :
671 : /** Send a version message to a peer */
672 : void PushNodeVersion(CNode& pnode, const Peer& peer);
673 :
674 : /** Send a ping message every PING_INTERVAL or if requested via RPC. May
675 : * mark the peer to be disconnected if a ping has timed out.
676 : * We use mockable time for ping timeouts, so setmocktime may cause pings
677 : * to time out. */
678 : void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now);
679 :
680 : /** Send `addr` messages on a regular schedule. */
681 : void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
682 :
683 : /** Send a single `sendheaders` message, after we have completed headers sync with a peer. */
684 : void MaybeSendSendHeaders(CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
685 :
686 : /** Relay (gossip) an address to a few randomly chosen nodes.
687 : *
688 : * @param[in] originator The id of the peer that sent us the address. We don't want to relay it back.
689 : * @param[in] addr Address to relay.
690 : * @param[in] fReachable Whether the address' network is reachable. We relay unreachable
691 : * addresses less.
692 : */
693 : void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
694 :
695 : /** Send `feefilter` message. */
696 : void MaybeSendFeefilter(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
697 :
698 : const CChainParams& m_chainparams;
699 : CConnman& m_connman;
700 : AddrMan& m_addrman;
701 : /** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */
702 : BanMan* const m_banman;
703 : ChainstateManager& m_chainman;
704 : CTxMemPool& m_mempool;
705 : TxRequestTracker m_txrequest GUARDED_BY(::cs_main);
706 : std::unique_ptr<TxReconciliationTracker> m_txreconciliation;
707 :
708 : /** The height of the best chain */
709 1 : std::atomic<int> m_best_height{-1};
710 :
711 : /** Next time to check for stale tip */
712 1 : std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s};
713 :
714 : const Options m_opts;
715 :
716 : bool RejectIncomingTxs(const CNode& peer) const;
717 :
718 : /** Whether we've completed initial sync yet, for determining when to turn
719 : * on extra block-relay-only peers. */
720 1 : bool m_initial_sync_finished GUARDED_BY(cs_main){false};
721 :
722 : /** Protects m_peer_map. This mutex must not be locked while holding a lock
723 : * on any of the mutexes inside a Peer object. */
724 : mutable Mutex m_peer_mutex;
725 : /**
726 : * Map of all Peer objects, keyed by peer id. This map is protected
727 : * by the m_peer_mutex. Once a shared pointer reference is
728 : * taken, the lock may be released. Individual fields are protected by
729 : * their own locks.
730 : */
731 : std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
732 :
733 : /** Map maintaining per-node state. */
734 : std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main);
735 :
736 : /** Get a pointer to a const CNodeState, used when not mutating the CNodeState object. */
737 : const CNodeState* State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main);
738 : /** Get a pointer to a mutable CNodeState. */
739 : CNodeState* State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
740 :
741 : uint32_t GetFetchFlags(const Peer& peer) const;
742 :
743 1 : std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
744 :
745 : /** Number of nodes with fSyncStarted. */
746 1 : int nSyncStarted GUARDED_BY(cs_main) = 0;
747 :
748 : /** Hash of the last block we received via INV */
749 1 : uint256 m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){};
750 :
751 : /**
752 : * Sources of received blocks, saved to be able punish them when processing
753 : * happens afterwards.
754 : * Set mapBlockSource[hash].second to false if the node should not be
755 : * punished if the block is invalid.
756 : */
757 : std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
758 :
759 : /** Number of peers with wtxid relay. */
760 1 : std::atomic<int> m_wtxid_relay_peers{0};
761 :
762 : /** Number of outbound peers with m_chain_sync.m_protect. */
763 1 : int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
764 :
765 : /** Number of preferable block download peers. */
766 1 : int m_num_preferred_download_peers GUARDED_BY(cs_main){0};
767 :
768 : /** Stalling timeout for blocks in IBD */
769 1 : std::atomic<std::chrono::seconds> m_block_stalling_timeout{BLOCK_STALLING_TIMEOUT_DEFAULT};
770 :
771 : bool AlreadyHaveTx(const GenTxid& gtxid)
772 : EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_recent_confirmed_transactions_mutex);
773 :
774 : /**
775 : * Filter for transactions that were recently rejected by the mempool.
776 : * These are not rerequested until the chain tip changes, at which point
777 : * the entire filter is reset.
778 : *
779 : * Without this filter we'd be re-requesting txs from each of our peers,
780 : * increasing bandwidth consumption considerably. For instance, with 100
781 : * peers, half of which relay a tx we don't accept, that might be a 50x
782 : * bandwidth increase. A flooding attacker attempting to roll-over the
783 : * filter using minimum-sized, 60byte, transactions might manage to send
784 : * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
785 : * two minute window to send invs to us.
786 : *
787 : * Decreasing the false positive rate is fairly cheap, so we pick one in a
788 : * million to make it highly unlikely for users to have issues with this
789 : * filter.
790 : *
791 : * We typically only add wtxids to this filter. For non-segwit
792 : * transactions, the txid == wtxid, so this only prevents us from
793 : * re-downloading non-segwit transactions when communicating with
794 : * non-wtxidrelay peers -- which is important for avoiding malleation
795 : * attacks that could otherwise interfere with transaction relay from
796 : * non-wtxidrelay peers. For communicating with wtxidrelay peers, having
797 : * the reject filter store wtxids is exactly what we want to avoid
798 : * redownload of a rejected transaction.
799 : *
800 : * In cases where we can tell that a segwit transaction will fail
801 : * validation no matter the witness, we may add the txid of such
802 : * transaction to the filter as well. This can be helpful when
803 : * communicating with txid-relay peers or if we were to otherwise fetch a
804 : * transaction via txid (eg in our orphan handling).
805 : *
806 : * Memory used: 1.3 MB
807 : */
808 1 : CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000, 0.000'001};
809 : uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
810 :
811 : /*
812 : * Filter for transactions that have been recently confirmed.
813 : * We use this to avoid requesting transactions that have already been
814 : * confirnmed.
815 : *
816 : * Blocks don't typically have more than 4000 transactions, so this should
817 : * be at least six blocks (~1 hr) worth of transactions that we can store,
818 : * inserting both a txid and wtxid for every observed transaction.
819 : * If the number of transactions appearing in a block goes up, or if we are
820 : * seeing getdata requests more than an hour after initial announcement, we
821 : * can increase this number.
822 : * The false positive rate of 1/1M should come out to less than 1
823 : * transaction per day that would be inadvertently ignored (which is the
824 : * same probability that we have in the reject filter).
825 : */
826 : Mutex m_recent_confirmed_transactions_mutex;
827 1 : CRollingBloomFilter m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex){48'000, 0.000'001};
828 :
829 : /**
830 : * For sending `inv`s to inbound peers, we use a single (exponentially
831 : * distributed) timer for all peers. If we used a separate timer for each
832 : * peer, a spy node could make multiple inbound connections to us to
833 : * accurately determine when we received the transaction (and potentially
834 : * determine the transaction's origin). */
835 : std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now,
836 : std::chrono::seconds average_interval);
837 :
838 :
839 : // All of the following cache a recent block, and are protected by m_most_recent_block_mutex
840 : Mutex m_most_recent_block_mutex;
841 : std::shared_ptr<const CBlock> m_most_recent_block GUARDED_BY(m_most_recent_block_mutex);
842 : std::shared_ptr<const CBlockHeaderAndShortTxIDs> m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex);
843 : uint256 m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex);
844 : std::unique_ptr<const std::map<uint256, CTransactionRef>> m_most_recent_block_txs GUARDED_BY(m_most_recent_block_mutex);
845 :
846 : // Data about the low-work headers synchronization, aggregated from all peers' HeadersSyncStates.
847 : /** Mutex guarding the other m_headers_presync_* variables. */
848 : Mutex m_headers_presync_mutex;
849 : /** A type to represent statistics about a peer's low-work headers sync.
850 : *
851 : * - The first field is the total verified amount of work in that synchronization.
852 : * - The second is:
853 : * - nullopt: the sync is in REDOWNLOAD phase (phase 2).
854 : * - {height, timestamp}: the sync has the specified tip height and block timestamp (phase 1).
855 : */
856 : using HeadersPresyncStats = std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>;
857 : /** Statistics for all peers in low-work headers sync. */
858 1 : std::map<NodeId, HeadersPresyncStats> m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex) {};
859 : /** The peer with the most-work entry in m_headers_presync_stats. */
860 1 : NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex) {-1};
861 : /** The m_headers_presync_stats improved, and needs signalling. */
862 1 : std::atomic_bool m_headers_presync_should_signal{false};
863 :
864 : /** Height of the highest block announced using BIP 152 high-bandwidth mode. */
865 1 : int m_highest_fast_announce GUARDED_BY(::cs_main){0};
866 :
867 : /** Have we requested this block from a peer */
868 : bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
869 :
870 : /** Have we requested this block from an outbound peer */
871 : bool IsBlockRequestedFromOutbound(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
872 :
873 : /** Remove this block from our tracked requested blocks. Called if:
874 : * - the block has been received from a peer
875 : * - the request for the block has timed out
876 : * If "from_peer" is specified, then only remove the block if it is in
877 : * flight from that peer (to avoid one peer's network traffic from
878 : * affecting another's state).
879 : */
880 : void RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
881 :
882 : /* Mark a block as in flight
883 : * Returns false, still setting pit, if the block was already in flight from the same peer
884 : * pit will only be valid as long as the same cs_main lock is being held
885 : */
886 : bool BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
887 :
888 : bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
889 :
890 : /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
891 : * at most count entries.
892 : */
893 : void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
894 :
895 : /* Multimap used to preserve insertion order */
896 : typedef std::multimap<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator>> BlockDownloadMap;
897 : BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main);
898 :
899 : /** When our tip was last updated. */
900 1 : std::atomic<std::chrono::seconds> m_last_tip_update{0s};
901 :
902 : /** Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). */
903 : CTransactionRef FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid)
904 : EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, NetEventsInterface::g_msgproc_mutex);
905 :
906 : void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc)
907 : EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex, NetEventsInterface::g_msgproc_mutex)
908 : LOCKS_EXCLUDED(::cs_main);
909 :
910 : /** Process a new block. Perform any post-processing housekeeping */
911 : void ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked);
912 :
913 : /** Process compact block txns */
914 : void ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions)
915 : EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex);
916 :
917 : /**
918 : * When a peer sends us a valid block, instruct it to announce blocks to us
919 : * using CMPCTBLOCK if possible by adding its nodeid to the end of
920 : * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by
921 : * removing the first element if necessary.
922 : */
923 : void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
924 :
925 : /** Stack of nodes which we have set to announce using compact blocks */
926 : std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
927 :
928 : /** Number of peers from which we're downloading blocks. */
929 1 : int m_peers_downloading_from GUARDED_BY(cs_main) = 0;
930 :
931 : /** Storage for orphan information */
932 : TxOrphanage m_orphanage;
933 :
934 : void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
935 :
936 : /** Orphan/conflicted/etc transactions that are kept for compact block reconstruction.
937 : * The last -blockreconstructionextratxn/DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN of
938 : * these are kept in a ring buffer */
939 : std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_msgproc_mutex);
940 : /** Offset into vExtraTxnForCompact to insert the next tx */
941 1 : size_t vExtraTxnForCompactIt GUARDED_BY(g_msgproc_mutex) = 0;
942 :
943 : /** Check whether the last unknown block a peer advertised is not yet known. */
944 : void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
945 : /** Update tracking information about which blocks a peer is assumed to have. */
946 : void UpdateBlockAvailability(NodeId nodeid, const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
947 : bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
948 :
949 : /**
950 : * To prevent fingerprinting attacks, only send blocks/headers outside of
951 : * the active chain if they are no more than a month older (both in time,
952 : * and in best equivalent proof of work) than the best header chain we know
953 : * about and we fully-validated them at some point.
954 : */
955 : bool BlockRequestAllowed(const CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
956 : bool AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
957 : void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv)
958 : EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
959 :
960 : /**
961 : * Validation logic for compact filters request handling.
962 : *
963 : * May disconnect from the peer in the case of a bad request.
964 : *
965 : * @param[in] node The node that we received the request from
966 : * @param[in] peer The peer that we received the request from
967 : * @param[in] filter_type The filter type the request is for. Must be basic filters.
968 : * @param[in] start_height The start height for the request
969 : * @param[in] stop_hash The stop_hash for the request
970 : * @param[in] max_height_diff The maximum number of items permitted to request, as specified in BIP 157
971 : * @param[out] stop_index The CBlockIndex for the stop_hash block, if the request can be serviced.
972 : * @param[out] filter_index The filter index, if the request can be serviced.
973 : * @return True if the request can be serviced.
974 : */
975 : bool PrepareBlockFilterRequest(CNode& node, Peer& peer,
976 : BlockFilterType filter_type, uint32_t start_height,
977 : const uint256& stop_hash, uint32_t max_height_diff,
978 : const CBlockIndex*& stop_index,
979 : BlockFilterIndex*& filter_index);
980 :
981 : /**
982 : * Handle a cfilters request.
983 : *
984 : * May disconnect from the peer in the case of a bad request.
985 : *
986 : * @param[in] node The node that we received the request from
987 : * @param[in] peer The peer that we received the request from
988 : * @param[in] vRecv The raw message received
989 : */
990 : void ProcessGetCFilters(CNode& node, Peer& peer, CDataStream& vRecv);
991 :
992 : /**
993 : * Handle a cfheaders request.
994 : *
995 : * May disconnect from the peer in the case of a bad request.
996 : *
997 : * @param[in] node The node that we received the request from
998 : * @param[in] peer The peer that we received the request from
999 : * @param[in] vRecv The raw message received
1000 : */
1001 : void ProcessGetCFHeaders(CNode& node, Peer& peer, CDataStream& vRecv);
1002 :
1003 : /**
1004 : * Handle a getcfcheckpt request.
1005 : *
1006 : * May disconnect from the peer in the case of a bad request.
1007 : *
1008 : * @param[in] node The node that we received the request from
1009 : * @param[in] peer The peer that we received the request from
1010 : * @param[in] vRecv The raw message received
1011 : */
1012 : void ProcessGetCFCheckPt(CNode& node, Peer& peer, CDataStream& vRecv);
1013 :
1014 : /** Checks if address relay is permitted with peer. If needed, initializes
1015 : * the m_addr_known bloom filter and sets m_addr_relay_enabled to true.
1016 : *
1017 : * @return True if address relay is enabled with peer
1018 : * False if address relay is disallowed
1019 : */
1020 : bool SetupAddressRelay(const CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1021 :
1022 : void AddAddressKnown(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1023 : void PushAddress(Peer& peer, const CAddress& addr, FastRandomContext& insecure_rand) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1024 : };
1025 :
1026 0 : const CNodeState* PeerManagerImpl::State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1027 : {
1028 0 : std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
1029 0 : if (it == m_node_states.end())
1030 0 : return nullptr;
1031 0 : return &it->second;
1032 0 : }
1033 :
1034 0 : CNodeState* PeerManagerImpl::State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1035 : {
1036 0 : return const_cast<CNodeState*>(std::as_const(*this).State(pnode));
1037 : }
1038 :
1039 : /**
1040 : * Whether the peer supports the address. For example, a peer that does not
1041 : * implement BIP155 cannot receive Tor v3 addresses because it requires
1042 : * ADDRv2 (BIP155) encoding.
1043 : */
1044 0 : static bool IsAddrCompatible(const Peer& peer, const CAddress& addr)
1045 : {
1046 0 : return peer.m_wants_addrv2 || addr.IsAddrV1Compatible();
1047 : }
1048 :
1049 0 : void PeerManagerImpl::AddAddressKnown(Peer& peer, const CAddress& addr)
1050 : {
1051 0 : assert(peer.m_addr_known);
1052 0 : peer.m_addr_known->insert(addr.GetKey());
1053 0 : }
1054 :
1055 0 : void PeerManagerImpl::PushAddress(Peer& peer, const CAddress& addr, FastRandomContext& insecure_rand)
1056 : {
1057 : // Known checking here is only to save space from duplicates.
1058 : // Before sending, we'll filter it again for known addresses that were
1059 : // added after addresses were pushed.
1060 0 : assert(peer.m_addr_known);
1061 0 : if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) && IsAddrCompatible(peer, addr)) {
1062 0 : if (peer.m_addrs_to_send.size() >= MAX_ADDR_TO_SEND) {
1063 0 : peer.m_addrs_to_send[insecure_rand.randrange(peer.m_addrs_to_send.size())] = addr;
1064 0 : } else {
1065 0 : peer.m_addrs_to_send.push_back(addr);
1066 : }
1067 0 : }
1068 0 : }
1069 :
1070 0 : static void AddKnownTx(Peer& peer, const uint256& hash)
1071 : {
1072 0 : auto tx_relay = peer.GetTxRelay();
1073 0 : if (!tx_relay) return;
1074 :
1075 0 : LOCK(tx_relay->m_tx_inventory_mutex);
1076 0 : tx_relay->m_tx_inventory_known_filter.insert(hash);
1077 0 : }
1078 :
1079 : /** Whether this peer can serve us blocks. */
1080 0 : static bool CanServeBlocks(const Peer& peer)
1081 : {
1082 0 : return peer.m_their_services & (NODE_NETWORK|NODE_NETWORK_LIMITED);
1083 : }
1084 :
1085 : /** Whether this peer can only serve limited recent blocks (e.g. because
1086 : * it prunes old blocks) */
1087 0 : static bool IsLimitedPeer(const Peer& peer)
1088 : {
1089 0 : return (!(peer.m_their_services & NODE_NETWORK) &&
1090 0 : (peer.m_their_services & NODE_NETWORK_LIMITED));
1091 : }
1092 :
1093 : /** Whether this peer can serve us witness data */
1094 0 : static bool CanServeWitnesses(const Peer& peer)
1095 : {
1096 0 : return peer.m_their_services & NODE_WITNESS;
1097 : }
1098 :
1099 0 : std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
1100 : std::chrono::seconds average_interval)
1101 : {
1102 0 : if (m_next_inv_to_inbounds.load() < now) {
1103 : // If this function were called from multiple threads simultaneously
1104 : // it would possible that both update the next send variable, and return a different result to their caller.
1105 : // This is not possible in practice as only the net processing thread invokes this function.
1106 0 : m_next_inv_to_inbounds = GetExponentialRand(now, average_interval);
1107 0 : }
1108 0 : return m_next_inv_to_inbounds;
1109 : }
1110 :
1111 0 : bool PeerManagerImpl::IsBlockRequested(const uint256& hash)
1112 : {
1113 0 : return mapBlocksInFlight.count(hash);
1114 : }
1115 :
1116 0 : bool PeerManagerImpl::IsBlockRequestedFromOutbound(const uint256& hash)
1117 : {
1118 0 : for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
1119 0 : auto [nodeid, block_it] = range.first->second;
1120 0 : CNodeState& nodestate = *Assert(State(nodeid));
1121 0 : if (!nodestate.m_is_inbound) return true;
1122 0 : }
1123 :
1124 0 : return false;
1125 0 : }
1126 :
1127 0 : void PeerManagerImpl::RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer)
1128 : {
1129 0 : auto range = mapBlocksInFlight.equal_range(hash);
1130 0 : if (range.first == range.second) {
1131 : // Block was not requested from any peer
1132 0 : return;
1133 : }
1134 :
1135 : // We should not have requested too many of this block
1136 0 : Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1137 :
1138 0 : while (range.first != range.second) {
1139 0 : auto [node_id, list_it] = range.first->second;
1140 :
1141 0 : if (from_peer && *from_peer != node_id) {
1142 0 : range.first++;
1143 0 : continue;
1144 : }
1145 :
1146 0 : CNodeState& state = *Assert(State(node_id));
1147 :
1148 0 : if (state.vBlocksInFlight.begin() == list_it) {
1149 : // First block on the queue was received, update the start download time for the next one
1150 0 : state.m_downloading_since = std::max(state.m_downloading_since, GetTime<std::chrono::microseconds>());
1151 0 : }
1152 0 : state.vBlocksInFlight.erase(list_it);
1153 :
1154 0 : if (state.vBlocksInFlight.empty()) {
1155 : // Last validated block on the queue for this peer was received.
1156 0 : m_peers_downloading_from--;
1157 0 : }
1158 0 : state.m_stalling_since = 0us;
1159 :
1160 0 : range.first = mapBlocksInFlight.erase(range.first);
1161 : }
1162 0 : }
1163 :
1164 0 : bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit)
1165 : {
1166 0 : const uint256& hash{block.GetBlockHash()};
1167 :
1168 0 : CNodeState *state = State(nodeid);
1169 0 : assert(state != nullptr);
1170 :
1171 0 : Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1172 :
1173 : // Short-circuit most stuff in case it is from the same node
1174 0 : for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
1175 0 : if (range.first->second.first == nodeid) {
1176 0 : if (pit) {
1177 0 : *pit = &range.first->second.second;
1178 0 : }
1179 0 : return false;
1180 : }
1181 0 : }
1182 :
1183 : // Make sure it's not being fetched already from same peer.
1184 0 : RemoveBlockRequest(hash, nodeid);
1185 :
1186 0 : std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
1187 0 : {&block, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)});
1188 0 : if (state->vBlocksInFlight.size() == 1) {
1189 : // We're starting a block download (batch) from this peer.
1190 0 : state->m_downloading_since = GetTime<std::chrono::microseconds>();
1191 0 : m_peers_downloading_from++;
1192 0 : }
1193 0 : auto itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it)));
1194 0 : if (pit) {
1195 0 : *pit = &itInFlight->second.second;
1196 0 : }
1197 0 : return true;
1198 0 : }
1199 :
1200 0 : void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
1201 : {
1202 0 : AssertLockHeld(cs_main);
1203 :
1204 : // When in -blocksonly mode, never request high-bandwidth mode from peers. Our
1205 : // mempool will not contain the transactions necessary to reconstruct the
1206 : // compact block.
1207 0 : if (m_opts.ignore_incoming_txs) return;
1208 :
1209 0 : CNodeState* nodestate = State(nodeid);
1210 0 : if (!nodestate || !nodestate->m_provides_cmpctblocks) {
1211 : // Don't request compact blocks if the peer has not signalled support
1212 0 : return;
1213 : }
1214 :
1215 0 : int num_outbound_hb_peers = 0;
1216 0 : for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
1217 0 : if (*it == nodeid) {
1218 0 : lNodesAnnouncingHeaderAndIDs.erase(it);
1219 0 : lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
1220 0 : return;
1221 : }
1222 0 : CNodeState *state = State(*it);
1223 0 : if (state != nullptr && !state->m_is_inbound) ++num_outbound_hb_peers;
1224 0 : }
1225 0 : if (nodestate->m_is_inbound) {
1226 : // If we're adding an inbound HB peer, make sure we're not removing
1227 : // our last outbound HB peer in the process.
1228 0 : if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1) {
1229 0 : CNodeState *remove_node = State(lNodesAnnouncingHeaderAndIDs.front());
1230 0 : if (remove_node != nullptr && !remove_node->m_is_inbound) {
1231 : // Put the HB outbound peer in the second slot, so that it
1232 : // doesn't get removed.
1233 0 : std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
1234 0 : }
1235 0 : }
1236 0 : }
1237 0 : m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
1238 0 : AssertLockHeld(::cs_main);
1239 0 : if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
1240 : // As per BIP152, we only get 3 of our peers to announce
1241 : // blocks using compact encodings.
1242 0 : m_connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [this](CNode* pnodeStop){
1243 0 : m_connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION));
1244 : // save BIP152 bandwidth state: we select peer to be low-bandwidth
1245 0 : pnodeStop->m_bip152_highbandwidth_to = false;
1246 0 : return true;
1247 0 : });
1248 0 : lNodesAnnouncingHeaderAndIDs.pop_front();
1249 0 : }
1250 0 : m_connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*high_bandwidth=*/true, /*version=*/CMPCTBLOCKS_VERSION));
1251 : // save BIP152 bandwidth state: we select peer to be high-bandwidth
1252 0 : pfrom->m_bip152_highbandwidth_to = true;
1253 0 : lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
1254 0 : return true;
1255 0 : });
1256 0 : }
1257 :
1258 0 : bool PeerManagerImpl::TipMayBeStale()
1259 : {
1260 0 : AssertLockHeld(cs_main);
1261 0 : const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
1262 0 : if (m_last_tip_update.load() == 0s) {
1263 0 : m_last_tip_update = GetTime<std::chrono::seconds>();
1264 0 : }
1265 0 : return m_last_tip_update.load() < GetTime<std::chrono::seconds>() - std::chrono::seconds{consensusParams.nPowTargetSpacing * 3} && mapBlocksInFlight.empty();
1266 : }
1267 :
1268 0 : bool PeerManagerImpl::CanDirectFetch()
1269 : {
1270 0 : return m_chainman.ActiveChain().Tip()->Time() > GetAdjustedTime() - m_chainparams.GetConsensus().PowTargetSpacing() * 20;
1271 : }
1272 :
1273 0 : static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
1274 : {
1275 0 : if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
1276 0 : return true;
1277 0 : if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
1278 0 : return true;
1279 0 : return false;
1280 0 : }
1281 :
1282 0 : void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) {
1283 0 : CNodeState *state = State(nodeid);
1284 0 : assert(state != nullptr);
1285 :
1286 0 : if (!state->hashLastUnknownBlock.IsNull()) {
1287 0 : const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock);
1288 0 : if (pindex && pindex->nChainWork > 0) {
1289 0 : if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1290 0 : state->pindexBestKnownBlock = pindex;
1291 0 : }
1292 0 : state->hashLastUnknownBlock.SetNull();
1293 0 : }
1294 0 : }
1295 0 : }
1296 :
1297 0 : void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
1298 0 : CNodeState *state = State(nodeid);
1299 0 : assert(state != nullptr);
1300 :
1301 0 : ProcessBlockAvailability(nodeid);
1302 :
1303 0 : const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
1304 0 : if (pindex && pindex->nChainWork > 0) {
1305 : // An actually better block was announced.
1306 0 : if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1307 0 : state->pindexBestKnownBlock = pindex;
1308 0 : }
1309 0 : } else {
1310 : // An unknown block was announced; just assume that the latest one is the best one.
1311 0 : state->hashLastUnknownBlock = hash;
1312 : }
1313 0 : }
1314 :
1315 0 : void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller)
1316 : {
1317 0 : if (count == 0)
1318 0 : return;
1319 :
1320 0 : vBlocks.reserve(vBlocks.size() + count);
1321 0 : CNodeState *state = State(peer.m_id);
1322 0 : assert(state != nullptr);
1323 :
1324 : // Make sure pindexBestKnownBlock is up to date, we'll need it.
1325 0 : ProcessBlockAvailability(peer.m_id);
1326 :
1327 0 : if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < m_chainman.ActiveChain().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) {
1328 : // This peer has nothing interesting.
1329 0 : return;
1330 : }
1331 :
1332 0 : if (state->pindexLastCommonBlock == nullptr) {
1333 : // Bootstrap quickly by guessing a parent of our best tip is the forking point.
1334 : // Guessing wrong in either direction is not a problem.
1335 0 : state->pindexLastCommonBlock = m_chainman.ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight, m_chainman.ActiveChain().Height())];
1336 0 : }
1337 :
1338 : // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
1339 : // of its current tip anymore. Go back enough to fix that.
1340 0 : state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
1341 0 : if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
1342 0 : return;
1343 :
1344 0 : std::vector<const CBlockIndex*> vToFetch;
1345 0 : const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
1346 : // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
1347 : // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
1348 : // download that next block if the window were 1 larger.
1349 0 : int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
1350 0 : int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
1351 0 : NodeId waitingfor = -1;
1352 0 : while (pindexWalk->nHeight < nMaxHeight) {
1353 : // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
1354 : // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
1355 : // as iterating over ~100 CBlockIndex* entries anyway.
1356 0 : int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
1357 0 : vToFetch.resize(nToFetch);
1358 0 : pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
1359 0 : vToFetch[nToFetch - 1] = pindexWalk;
1360 0 : for (unsigned int i = nToFetch - 1; i > 0; i--) {
1361 0 : vToFetch[i - 1] = vToFetch[i]->pprev;
1362 0 : }
1363 :
1364 : // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
1365 : // are not yet downloaded and not in flight to vBlocks. In the meantime, update
1366 : // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
1367 : // already part of our chain (and therefore don't need it even if pruned).
1368 0 : for (const CBlockIndex* pindex : vToFetch) {
1369 0 : if (!pindex->IsValid(BLOCK_VALID_TREE)) {
1370 : // We consider the chain that this peer is on invalid.
1371 0 : return;
1372 : }
1373 0 : if (!CanServeWitnesses(peer) && DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) {
1374 : // We wouldn't download this block or its descendants from this peer.
1375 0 : return;
1376 : }
1377 0 : if (pindex->nStatus & BLOCK_HAVE_DATA || m_chainman.ActiveChain().Contains(pindex)) {
1378 0 : if (pindex->HaveTxsDownloaded())
1379 0 : state->pindexLastCommonBlock = pindex;
1380 0 : } else if (!IsBlockRequested(pindex->GetBlockHash())) {
1381 : // The block is not already downloaded, and not yet in flight.
1382 0 : if (pindex->nHeight > nWindowEnd) {
1383 : // We reached the end of the window.
1384 0 : if (vBlocks.size() == 0 && waitingfor != peer.m_id) {
1385 : // We aren't able to fetch anything, but we would be if the download window was one larger.
1386 0 : nodeStaller = waitingfor;
1387 0 : }
1388 0 : return;
1389 : }
1390 0 : vBlocks.push_back(pindex);
1391 0 : if (vBlocks.size() == count) {
1392 0 : return;
1393 : }
1394 0 : } else if (waitingfor == -1) {
1395 : // This is the first already-in-flight block.
1396 0 : waitingfor = mapBlocksInFlight.lower_bound(pindex->GetBlockHash())->second.first;
1397 0 : }
1398 : }
1399 : }
1400 0 : }
1401 :
1402 : } // namespace
1403 :
1404 0 : void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer)
1405 : {
1406 0 : uint64_t my_services{peer.m_our_services};
1407 0 : const int64_t nTime{count_seconds(GetTime<std::chrono::seconds>())};
1408 0 : uint64_t nonce = pnode.GetLocalNonce();
1409 0 : const int nNodeStartingHeight{m_best_height};
1410 0 : NodeId nodeid = pnode.GetId();
1411 0 : CAddress addr = pnode.addr;
1412 :
1413 0 : CService addr_you = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ? addr : CService();
1414 0 : uint64_t your_services{addr.nServices};
1415 :
1416 0 : const bool tx_relay{!RejectIncomingTxs(pnode)};
1417 0 : m_connman.PushMessage(&pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, my_services, nTime,
1418 0 : your_services, CNetAddr::V1(addr_you), // Together the pre-version-31402 serialization of CAddress "addrYou" (without nTime)
1419 0 : my_services, CNetAddr::V1(CService{}), // Together the pre-version-31402 serialization of CAddress "addrMe" (without nTime)
1420 : nonce, strSubVersion, nNodeStartingHeight, tx_relay));
1421 :
1422 0 : if (fLogIPs) {
1423 0 : LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, them=%s, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addr_you.ToStringAddrPort(), tx_relay, nodeid);
1424 0 : } else {
1425 0 : LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, tx_relay, nodeid);
1426 : }
1427 0 : }
1428 :
1429 0 : void PeerManagerImpl::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
1430 : {
1431 0 : AssertLockHeld(::cs_main); // For m_txrequest
1432 0 : NodeId nodeid = node.GetId();
1433 0 : if (!node.HasPermission(NetPermissionFlags::Relay) && m_txrequest.Count(nodeid) >= MAX_PEER_TX_ANNOUNCEMENTS) {
1434 : // Too many queued announcements from this peer
1435 0 : return;
1436 : }
1437 0 : const CNodeState* state = State(nodeid);
1438 :
1439 : // Decide the TxRequestTracker parameters for this announcement:
1440 : // - "preferred": if fPreferredDownload is set (= outbound, or NetPermissionFlags::NoBan permission)
1441 : // - "reqtime": current time plus delays for:
1442 : // - NONPREF_PEER_TX_DELAY for announcements from non-preferred connections
1443 : // - TXID_RELAY_DELAY for txid announcements while wtxid peers are available
1444 : // - OVERLOADED_PEER_TX_DELAY for announcements from peers which have at least
1445 : // MAX_PEER_TX_REQUEST_IN_FLIGHT requests in flight (and don't have NetPermissionFlags::Relay).
1446 0 : auto delay{0us};
1447 0 : const bool preferred = state->fPreferredDownload;
1448 0 : if (!preferred) delay += NONPREF_PEER_TX_DELAY;
1449 0 : if (!gtxid.IsWtxid() && m_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY;
1450 0 : const bool overloaded = !node.HasPermission(NetPermissionFlags::Relay) &&
1451 0 : m_txrequest.CountInFlight(nodeid) >= MAX_PEER_TX_REQUEST_IN_FLIGHT;
1452 0 : if (overloaded) delay += OVERLOADED_PEER_TX_DELAY;
1453 0 : m_txrequest.ReceivedInv(nodeid, gtxid, preferred, current_time + delay);
1454 0 : }
1455 :
1456 0 : void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
1457 : {
1458 0 : LOCK(cs_main);
1459 0 : CNodeState *state = State(node);
1460 0 : if (state) state->m_last_block_announcement = time_in_seconds;
1461 0 : }
1462 :
1463 0 : void PeerManagerImpl::InitializeNode(CNode& node, ServiceFlags our_services)
1464 : {
1465 0 : NodeId nodeid = node.GetId();
1466 : {
1467 0 : LOCK(cs_main);
1468 0 : m_node_states.emplace_hint(m_node_states.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(node.IsInboundConn()));
1469 0 : assert(m_txrequest.Count(nodeid) == 0);
1470 0 : }
1471 0 : PeerRef peer = std::make_shared<Peer>(nodeid, our_services);
1472 : {
1473 0 : LOCK(m_peer_mutex);
1474 0 : m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
1475 0 : }
1476 0 : if (!node.IsInboundConn()) {
1477 0 : PushNodeVersion(node, *peer);
1478 0 : }
1479 0 : }
1480 :
1481 0 : void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler)
1482 : {
1483 0 : std::set<uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
1484 :
1485 0 : for (const auto& txid : unbroadcast_txids) {
1486 0 : CTransactionRef tx = m_mempool.get(txid);
1487 :
1488 0 : if (tx != nullptr) {
1489 0 : RelayTransaction(txid, tx->GetWitnessHash());
1490 0 : } else {
1491 0 : m_mempool.RemoveUnbroadcastTx(txid, true);
1492 : }
1493 0 : }
1494 :
1495 : // Schedule next run for 10-15 minutes in the future.
1496 : // We add randomness on every cycle to avoid the possibility of P2P fingerprinting.
1497 0 : const std::chrono::milliseconds delta = 10min + GetRandMillis(5min);
1498 0 : scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1499 0 : }
1500 :
1501 0 : void PeerManagerImpl::FinalizeNode(const CNode& node)
1502 : {
1503 0 : NodeId nodeid = node.GetId();
1504 0 : int misbehavior{0};
1505 : {
1506 0 : LOCK(cs_main);
1507 : {
1508 : // We remove the PeerRef from g_peer_map here, but we don't always
1509 : // destruct the Peer. Sometimes another thread is still holding a
1510 : // PeerRef, so the refcount is >= 1. Be careful not to do any
1511 : // processing here that assumes Peer won't be changed before it's
1512 : // destructed.
1513 0 : PeerRef peer = RemovePeer(nodeid);
1514 0 : assert(peer != nullptr);
1515 0 : misbehavior = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score);
1516 0 : m_wtxid_relay_peers -= peer->m_wtxid_relay;
1517 0 : assert(m_wtxid_relay_peers >= 0);
1518 0 : }
1519 0 : CNodeState *state = State(nodeid);
1520 0 : assert(state != nullptr);
1521 :
1522 0 : if (state->fSyncStarted)
1523 0 : nSyncStarted--;
1524 :
1525 0 : for (const QueuedBlock& entry : state->vBlocksInFlight) {
1526 0 : auto range = mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash());
1527 0 : while (range.first != range.second) {
1528 0 : auto [node_id, list_it] = range.first->second;
1529 0 : if (node_id != nodeid) {
1530 0 : range.first++;
1531 0 : } else {
1532 0 : range.first = mapBlocksInFlight.erase(range.first);
1533 : }
1534 : }
1535 : }
1536 0 : m_orphanage.EraseForPeer(nodeid);
1537 0 : m_txrequest.DisconnectedPeer(nodeid);
1538 0 : if (m_txreconciliation) m_txreconciliation->ForgetPeer(nodeid);
1539 0 : m_num_preferred_download_peers -= state->fPreferredDownload;
1540 0 : m_peers_downloading_from -= (!state->vBlocksInFlight.empty());
1541 0 : assert(m_peers_downloading_from >= 0);
1542 0 : m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
1543 0 : assert(m_outbound_peers_with_protect_from_disconnect >= 0);
1544 :
1545 0 : m_node_states.erase(nodeid);
1546 :
1547 0 : if (m_node_states.empty()) {
1548 : // Do a consistency check after the last peer is removed.
1549 0 : assert(mapBlocksInFlight.empty());
1550 0 : assert(m_num_preferred_download_peers == 0);
1551 0 : assert(m_peers_downloading_from == 0);
1552 0 : assert(m_outbound_peers_with_protect_from_disconnect == 0);
1553 0 : assert(m_wtxid_relay_peers == 0);
1554 0 : assert(m_txrequest.Size() == 0);
1555 0 : assert(m_orphanage.Size() == 0);
1556 0 : }
1557 0 : } // cs_main
1558 0 : if (node.fSuccessfullyConnected && misbehavior == 0 &&
1559 0 : !node.IsBlockOnlyConn() && !node.IsInboundConn()) {
1560 : // Only change visible addrman state for full outbound peers. We don't
1561 : // call Connected() for feeler connections since they don't have
1562 : // fSuccessfullyConnected set.
1563 0 : m_addrman.Connected(node.addr);
1564 0 : }
1565 : {
1566 0 : LOCK(m_headers_presync_mutex);
1567 0 : m_headers_presync_stats.erase(nodeid);
1568 0 : }
1569 0 : LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
1570 0 : }
1571 :
1572 0 : PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const
1573 : {
1574 0 : LOCK(m_peer_mutex);
1575 0 : auto it = m_peer_map.find(id);
1576 0 : return it != m_peer_map.end() ? it->second : nullptr;
1577 0 : }
1578 :
1579 0 : PeerRef PeerManagerImpl::RemovePeer(NodeId id)
1580 : {
1581 0 : PeerRef ret;
1582 0 : LOCK(m_peer_mutex);
1583 0 : auto it = m_peer_map.find(id);
1584 0 : if (it != m_peer_map.end()) {
1585 0 : ret = std::move(it->second);
1586 0 : m_peer_map.erase(it);
1587 0 : }
1588 0 : return ret;
1589 0 : }
1590 :
1591 0 : bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const
1592 : {
1593 : {
1594 0 : LOCK(cs_main);
1595 0 : const CNodeState* state = State(nodeid);
1596 0 : if (state == nullptr)
1597 0 : return false;
1598 0 : stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
1599 0 : stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
1600 0 : for (const QueuedBlock& queue : state->vBlocksInFlight) {
1601 0 : if (queue.pindex)
1602 0 : stats.vHeightInFlight.push_back(queue.pindex->nHeight);
1603 : }
1604 0 : }
1605 :
1606 0 : PeerRef peer = GetPeerRef(nodeid);
1607 0 : if (peer == nullptr) return false;
1608 0 : stats.their_services = peer->m_their_services;
1609 0 : stats.m_starting_height = peer->m_starting_height;
1610 : // It is common for nodes with good ping times to suddenly become lagged,
1611 : // due to a new block arriving or other large transfer.
1612 : // Merely reporting pingtime might fool the caller into thinking the node was still responsive,
1613 : // since pingtime does not update until the ping is complete, which might take a while.
1614 : // So, if a ping is taking an unusually long time in flight,
1615 : // the caller can immediately detect that this is happening.
1616 0 : auto ping_wait{0us};
1617 0 : if ((0 != peer->m_ping_nonce_sent) && (0 != peer->m_ping_start.load().count())) {
1618 0 : ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
1619 0 : }
1620 :
1621 0 : if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
1622 0 : stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs);
1623 0 : stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load();
1624 0 : } else {
1625 0 : stats.m_relay_txs = false;
1626 0 : stats.m_fee_filter_received = 0;
1627 : }
1628 :
1629 0 : stats.m_ping_wait = ping_wait;
1630 0 : stats.m_addr_processed = peer->m_addr_processed.load();
1631 0 : stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
1632 0 : stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load();
1633 : {
1634 0 : LOCK(peer->m_headers_sync_mutex);
1635 0 : if (peer->m_headers_sync) {
1636 0 : stats.presync_height = peer->m_headers_sync->GetPresyncHeight();
1637 0 : }
1638 0 : }
1639 :
1640 0 : return true;
1641 0 : }
1642 :
1643 0 : void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx)
1644 : {
1645 0 : if (m_opts.max_extra_txs <= 0)
1646 0 : return;
1647 0 : if (!vExtraTxnForCompact.size())
1648 0 : vExtraTxnForCompact.resize(m_opts.max_extra_txs);
1649 0 : vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetWitnessHash(), tx);
1650 0 : vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs;
1651 0 : }
1652 :
1653 0 : void PeerManagerImpl::Misbehaving(Peer& peer, int howmuch, const std::string& message)
1654 : {
1655 0 : assert(howmuch > 0);
1656 :
1657 0 : LOCK(peer.m_misbehavior_mutex);
1658 0 : const int score_before{peer.m_misbehavior_score};
1659 0 : peer.m_misbehavior_score += howmuch;
1660 0 : const int score_now{peer.m_misbehavior_score};
1661 :
1662 0 : const std::string message_prefixed = message.empty() ? "" : (": " + message);
1663 0 : std::string warning;
1664 :
1665 0 : if (score_now >= DISCOURAGEMENT_THRESHOLD && score_before < DISCOURAGEMENT_THRESHOLD) {
1666 0 : warning = " DISCOURAGE THRESHOLD EXCEEDED";
1667 0 : peer.m_should_discourage = true;
1668 0 : }
1669 :
1670 0 : LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s%s\n",
1671 : peer.m_id, score_before, score_now, warning, message_prefixed);
1672 0 : }
1673 :
1674 0 : bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
1675 : bool via_compact_block, const std::string& message)
1676 : {
1677 0 : PeerRef peer{GetPeerRef(nodeid)};
1678 0 : switch (state.GetResult()) {
1679 : case BlockValidationResult::BLOCK_RESULT_UNSET:
1680 0 : break;
1681 : case BlockValidationResult::BLOCK_HEADER_LOW_WORK:
1682 : // We didn't try to process the block because the header chain may have
1683 : // too little work.
1684 0 : break;
1685 : // The node is providing invalid data:
1686 : case BlockValidationResult::BLOCK_CONSENSUS:
1687 : case BlockValidationResult::BLOCK_MUTATED:
1688 0 : if (!via_compact_block) {
1689 0 : if (peer) Misbehaving(*peer, 100, message);
1690 0 : return true;
1691 : }
1692 0 : break;
1693 : case BlockValidationResult::BLOCK_CACHED_INVALID:
1694 : {
1695 0 : LOCK(cs_main);
1696 0 : CNodeState *node_state = State(nodeid);
1697 0 : if (node_state == nullptr) {
1698 0 : break;
1699 : }
1700 :
1701 : // Discourage outbound (but not inbound) peers if on an invalid chain.
1702 : // Exempt HB compact block peers. Manual connections are always protected from discouragement.
1703 0 : if (!via_compact_block && !node_state->m_is_inbound) {
1704 0 : if (peer) Misbehaving(*peer, 100, message);
1705 0 : return true;
1706 : }
1707 0 : break;
1708 0 : }
1709 : case BlockValidationResult::BLOCK_INVALID_HEADER:
1710 : case BlockValidationResult::BLOCK_CHECKPOINT:
1711 : case BlockValidationResult::BLOCK_INVALID_PREV:
1712 0 : if (peer) Misbehaving(*peer, 100, message);
1713 0 : return true;
1714 : // Conflicting (but not necessarily invalid) data or different policy:
1715 : case BlockValidationResult::BLOCK_MISSING_PREV:
1716 : // TODO: Handle this much more gracefully (10 DoS points is super arbitrary)
1717 0 : if (peer) Misbehaving(*peer, 10, message);
1718 0 : return true;
1719 : case BlockValidationResult::BLOCK_RECENT_CONSENSUS_CHANGE:
1720 : case BlockValidationResult::BLOCK_TIME_FUTURE:
1721 0 : break;
1722 : }
1723 0 : if (message != "") {
1724 0 : LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1725 0 : }
1726 0 : return false;
1727 0 : }
1728 :
1729 0 : bool PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
1730 : {
1731 0 : PeerRef peer{GetPeerRef(nodeid)};
1732 0 : switch (state.GetResult()) {
1733 : case TxValidationResult::TX_RESULT_UNSET:
1734 0 : break;
1735 : // The node is providing invalid data:
1736 : case TxValidationResult::TX_CONSENSUS:
1737 0 : if (peer) Misbehaving(*peer, 100, "");
1738 0 : return true;
1739 : // Conflicting (but not necessarily invalid) data or different policy:
1740 : case TxValidationResult::TX_RECENT_CONSENSUS_CHANGE:
1741 : case TxValidationResult::TX_INPUTS_NOT_STANDARD:
1742 : case TxValidationResult::TX_NOT_STANDARD:
1743 : case TxValidationResult::TX_MISSING_INPUTS:
1744 : case TxValidationResult::TX_PREMATURE_SPEND:
1745 : case TxValidationResult::TX_WITNESS_MUTATED:
1746 : case TxValidationResult::TX_WITNESS_STRIPPED:
1747 : case TxValidationResult::TX_CONFLICT:
1748 : case TxValidationResult::TX_MEMPOOL_POLICY:
1749 : case TxValidationResult::TX_NO_MEMPOOL:
1750 0 : break;
1751 : }
1752 0 : return false;
1753 0 : }
1754 :
1755 0 : bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
1756 : {
1757 0 : AssertLockHeld(cs_main);
1758 0 : if (m_chainman.ActiveChain().Contains(pindex)) return true;
1759 0 : return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (m_chainman.m_best_header != nullptr) &&
1760 0 : (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) &&
1761 0 : (GetBlockProofEquivalentTime(*m_chainman.m_best_header, *pindex, *m_chainman.m_best_header, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
1762 0 : }
1763 :
1764 0 : std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index)
1765 : {
1766 0 : if (m_chainman.m_blockman.LoadingBlocks()) return "Loading blocks ...";
1767 :
1768 : // Ensure this peer exists and hasn't been disconnected
1769 0 : PeerRef peer = GetPeerRef(peer_id);
1770 0 : if (peer == nullptr) return "Peer does not exist";
1771 :
1772 : // Ignore pre-segwit peers
1773 0 : if (!CanServeWitnesses(*peer)) return "Pre-SegWit peer";
1774 :
1775 0 : LOCK(cs_main);
1776 :
1777 : // Forget about all prior requests
1778 0 : RemoveBlockRequest(block_index.GetBlockHash(), std::nullopt);
1779 :
1780 : // Mark block as in-flight
1781 0 : if (!BlockRequested(peer_id, block_index)) return "Already requested from this peer";
1782 :
1783 : // Construct message to request the block
1784 0 : const uint256& hash{block_index.GetBlockHash()};
1785 0 : std::vector<CInv> invs{CInv(MSG_BLOCK | MSG_WITNESS_FLAG, hash)};
1786 :
1787 : // Send block request message to the peer
1788 0 : bool success = m_connman.ForNode(peer_id, [this, &invs](CNode* node) {
1789 0 : const CNetMsgMaker msgMaker(node->GetCommonVersion());
1790 0 : this->m_connman.PushMessage(node, msgMaker.Make(NetMsgType::GETDATA, invs));
1791 0 : return true;
1792 0 : });
1793 :
1794 0 : if (!success) return "Peer not fully connected";
1795 :
1796 0 : LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
1797 : hash.ToString(), peer_id);
1798 0 : return std::nullopt;
1799 0 : }
1800 :
1801 1 : std::unique_ptr<PeerManager> PeerManager::make(CConnman& connman, AddrMan& addrman,
1802 : BanMan* banman, ChainstateManager& chainman,
1803 : CTxMemPool& pool, Options opts)
1804 : {
1805 1 : return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman, pool, opts);
1806 : }
1807 :
1808 2 : PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman,
1809 : BanMan* banman, ChainstateManager& chainman,
1810 : CTxMemPool& pool, Options opts)
1811 1 : : m_chainparams(chainman.GetParams()),
1812 1 : m_connman(connman),
1813 1 : m_addrman(addrman),
1814 1 : m_banman(banman),
1815 1 : m_chainman(chainman),
1816 1 : m_mempool(pool),
1817 1 : m_opts{opts}
1818 1 : {
1819 : // While Erlay support is incomplete, it must be enabled explicitly via -txreconciliation.
1820 : // This argument can go away after Erlay support is complete.
1821 1 : if (opts.reconcile_txs) {
1822 0 : m_txreconciliation = std::make_unique<TxReconciliationTracker>(TXRECONCILIATION_VERSION);
1823 0 : }
1824 1 : }
1825 :
1826 0 : void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler)
1827 : {
1828 : // Stale tip checking and peer eviction are on two different timers, but we
1829 : // don't want them to get out of sync due to drift in the scheduler, so we
1830 : // combine them in one function and schedule at the quicker (peer-eviction)
1831 : // timer.
1832 : static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
1833 0 : scheduler.scheduleEvery([this] { this->CheckForStaleTipAndEvictPeers(); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
1834 :
1835 : // schedule next run for 10-15 minutes in the future
1836 0 : const std::chrono::milliseconds delta = 10min + GetRandMillis(5min);
1837 0 : scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
1838 0 : }
1839 :
1840 : /**
1841 : * Evict orphan txn pool entries based on a newly connected
1842 : * block, remember the recently confirmed transactions, and delete tracked
1843 : * announcements for them. Also save the time of the last tip update and
1844 : * possibly reduce dynamic block stalling timeout.
1845 : */
1846 0 : void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex)
1847 : {
1848 0 : m_orphanage.EraseForBlock(*pblock);
1849 0 : m_last_tip_update = GetTime<std::chrono::seconds>();
1850 :
1851 : {
1852 0 : LOCK(m_recent_confirmed_transactions_mutex);
1853 0 : for (const auto& ptx : pblock->vtx) {
1854 0 : m_recent_confirmed_transactions.insert(ptx->GetHash());
1855 0 : if (ptx->GetHash() != ptx->GetWitnessHash()) {
1856 0 : m_recent_confirmed_transactions.insert(ptx->GetWitnessHash());
1857 0 : }
1858 : }
1859 0 : }
1860 : {
1861 0 : LOCK(cs_main);
1862 0 : for (const auto& ptx : pblock->vtx) {
1863 0 : m_txrequest.ForgetTxHash(ptx->GetHash());
1864 0 : m_txrequest.ForgetTxHash(ptx->GetWitnessHash());
1865 : }
1866 0 : }
1867 :
1868 : // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value
1869 0 : auto stalling_timeout = m_block_stalling_timeout.load();
1870 0 : Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT);
1871 0 : if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) {
1872 0 : const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT);
1873 0 : if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
1874 0 : LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout));
1875 0 : }
1876 0 : }
1877 0 : }
1878 :
1879 0 : void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
1880 : {
1881 : // To avoid relay problems with transactions that were previously
1882 : // confirmed, clear our filter of recently confirmed transactions whenever
1883 : // there's a reorg.
1884 : // This means that in a 1-block reorg (where 1 block is disconnected and
1885 : // then another block reconnected), our filter will drop to having only one
1886 : // block's worth of transactions in it, but that should be fine, since
1887 : // presumably the most common case of relaying a confirmed transaction
1888 : // should be just after a new block containing it is found.
1889 0 : LOCK(m_recent_confirmed_transactions_mutex);
1890 0 : m_recent_confirmed_transactions.reset();
1891 0 : }
1892 :
1893 : /**
1894 : * Maintain state about the best-seen block and fast-announce a compact block
1895 : * to compatible peers.
1896 : */
1897 0 : void PeerManagerImpl::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock)
1898 : {
1899 0 : auto pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock);
1900 0 : const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
1901 :
1902 0 : LOCK(cs_main);
1903 :
1904 0 : if (pindex->nHeight <= m_highest_fast_announce)
1905 0 : return;
1906 0 : m_highest_fast_announce = pindex->nHeight;
1907 :
1908 0 : if (!DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) return;
1909 :
1910 0 : uint256 hashBlock(pblock->GetHash());
1911 0 : const std::shared_future<CSerializedNetMsg> lazy_ser{
1912 0 : std::async(std::launch::deferred, [&] { return msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock); })};
1913 :
1914 : {
1915 0 : auto most_recent_block_txs = std::make_unique<std::map<uint256, CTransactionRef>>();
1916 0 : for (const auto& tx : pblock->vtx) {
1917 0 : most_recent_block_txs->emplace(tx->GetHash(), tx);
1918 0 : most_recent_block_txs->emplace(tx->GetWitnessHash(), tx);
1919 : }
1920 :
1921 0 : LOCK(m_most_recent_block_mutex);
1922 0 : m_most_recent_block_hash = hashBlock;
1923 0 : m_most_recent_block = pblock;
1924 0 : m_most_recent_compact_block = pcmpctblock;
1925 0 : m_most_recent_block_txs = std::move(most_recent_block_txs);
1926 0 : }
1927 :
1928 0 : m_connman.ForEachNode([this, pindex, &lazy_ser, &hashBlock](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
1929 0 : AssertLockHeld(::cs_main);
1930 :
1931 0 : if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect)
1932 0 : return;
1933 0 : ProcessBlockAvailability(pnode->GetId());
1934 0 : CNodeState &state = *State(pnode->GetId());
1935 : // If the peer has, or we announced to them the previous block already,
1936 : // but we don't think they have this one, go ahead and announce it
1937 0 : if (state.m_requested_hb_cmpctblocks && !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
1938 :
1939 0 : LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerManager::NewPoWValidBlock",
1940 : hashBlock.ToString(), pnode->GetId());
1941 :
1942 0 : const CSerializedNetMsg& ser_cmpctblock{lazy_ser.get()};
1943 0 : m_connman.PushMessage(pnode, ser_cmpctblock.Copy());
1944 0 : state.pindexBestHeaderSent = pindex;
1945 0 : }
1946 0 : });
1947 0 : }
1948 :
1949 : /**
1950 : * Update our best height and announce any block hashes which weren't previously
1951 : * in m_chainman.ActiveChain() to our peers.
1952 : */
1953 0 : void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
1954 : {
1955 0 : SetBestHeight(pindexNew->nHeight);
1956 0 : SetServiceFlagsIBDCache(!fInitialDownload);
1957 :
1958 : // Don't relay inventory during initial block download.
1959 0 : if (fInitialDownload) return;
1960 :
1961 : // Find the hashes of all blocks that weren't previously in the best chain.
1962 0 : std::vector<uint256> vHashes;
1963 0 : const CBlockIndex *pindexToAnnounce = pindexNew;
1964 0 : while (pindexToAnnounce != pindexFork) {
1965 0 : vHashes.push_back(pindexToAnnounce->GetBlockHash());
1966 0 : pindexToAnnounce = pindexToAnnounce->pprev;
1967 0 : if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
1968 : // Limit announcements in case of a huge reorganization.
1969 : // Rely on the peer's synchronization mechanism in that case.
1970 0 : break;
1971 : }
1972 : }
1973 :
1974 : {
1975 0 : LOCK(m_peer_mutex);
1976 0 : for (auto& it : m_peer_map) {
1977 0 : Peer& peer = *it.second;
1978 0 : LOCK(peer.m_block_inv_mutex);
1979 0 : for (const uint256& hash : reverse_iterate(vHashes)) {
1980 0 : peer.m_blocks_for_headers_relay.push_back(hash);
1981 : }
1982 0 : }
1983 0 : }
1984 :
1985 0 : m_connman.WakeMessageHandler();
1986 0 : }
1987 :
1988 : /**
1989 : * Handle invalid block rejection and consequent peer discouragement, maintain which
1990 : * peers announce compact blocks.
1991 : */
1992 0 : void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationState& state)
1993 : {
1994 0 : LOCK(cs_main);
1995 :
1996 0 : const uint256 hash(block.GetHash());
1997 0 : std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
1998 :
1999 : // If the block failed validation, we know where it came from and we're still connected
2000 : // to that peer, maybe punish.
2001 0 : if (state.IsInvalid() &&
2002 0 : it != mapBlockSource.end() &&
2003 0 : State(it->second.first)) {
2004 0 : MaybePunishNodeForBlock(/*nodeid=*/ it->second.first, state, /*via_compact_block=*/ !it->second.second);
2005 0 : }
2006 : // Check that:
2007 : // 1. The block is valid
2008 : // 2. We're not in initial block download
2009 : // 3. This is currently the best block we're aware of. We haven't updated
2010 : // the tip yet so we have no way to check this directly here. Instead we
2011 : // just check that there are currently no other blocks in flight.
2012 0 : else if (state.IsValid() &&
2013 0 : !m_chainman.IsInitialBlockDownload() &&
2014 0 : mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
2015 0 : if (it != mapBlockSource.end()) {
2016 0 : MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
2017 0 : }
2018 0 : }
2019 0 : if (it != mapBlockSource.end())
2020 0 : mapBlockSource.erase(it);
2021 0 : }
2022 :
2023 : //////////////////////////////////////////////////////////////////////////////
2024 : //
2025 : // Messages
2026 : //
2027 :
2028 :
2029 0 : bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid)
2030 : {
2031 0 : if (m_chainman.ActiveChain().Tip()->GetBlockHash() != hashRecentRejectsChainTip) {
2032 : // If the chain tip has changed previously rejected transactions
2033 : // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
2034 : // or a double-spend. Reset the rejects filter and give those
2035 : // txs a second chance.
2036 0 : hashRecentRejectsChainTip = m_chainman.ActiveChain().Tip()->GetBlockHash();
2037 0 : m_recent_rejects.reset();
2038 0 : }
2039 :
2040 0 : const uint256& hash = gtxid.GetHash();
2041 :
2042 0 : if (m_orphanage.HaveTx(gtxid)) return true;
2043 :
2044 : {
2045 0 : LOCK(m_recent_confirmed_transactions_mutex);
2046 0 : if (m_recent_confirmed_transactions.contains(hash)) return true;
2047 0 : }
2048 :
2049 0 : return m_recent_rejects.contains(hash) || m_mempool.exists(gtxid);
2050 0 : }
2051 :
2052 0 : bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash)
2053 : {
2054 0 : return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr;
2055 : }
2056 :
2057 0 : void PeerManagerImpl::SendPings()
2058 : {
2059 0 : LOCK(m_peer_mutex);
2060 0 : for(auto& it : m_peer_map) it.second->m_ping_queued = true;
2061 0 : }
2062 :
2063 0 : void PeerManagerImpl::RelayTransaction(const uint256& txid, const uint256& wtxid)
2064 : {
2065 0 : LOCK(m_peer_mutex);
2066 0 : for(auto& it : m_peer_map) {
2067 0 : Peer& peer = *it.second;
2068 0 : auto tx_relay = peer.GetTxRelay();
2069 0 : if (!tx_relay) continue;
2070 :
2071 0 : LOCK(tx_relay->m_tx_inventory_mutex);
2072 : // Only queue transactions for announcement once the version handshake
2073 : // is completed. The time of arrival for these transactions is
2074 : // otherwise at risk of leaking to a spy, if the spy is able to
2075 : // distinguish transactions received during the handshake from the rest
2076 : // in the announcement.
2077 0 : if (tx_relay->m_next_inv_send_time == 0s) continue;
2078 :
2079 0 : const uint256& hash{peer.m_wtxid_relay ? wtxid : txid};
2080 0 : if (!tx_relay->m_tx_inventory_known_filter.contains(hash)) {
2081 0 : tx_relay->m_tx_inventory_to_send.insert(hash);
2082 0 : }
2083 0 : };
2084 0 : }
2085 :
2086 0 : void PeerManagerImpl::RelayAddress(NodeId originator,
2087 : const CAddress& addr,
2088 : bool fReachable)
2089 : {
2090 : // We choose the same nodes within a given 24h window (if the list of connected
2091 : // nodes does not change) and we don't relay to nodes that already know an
2092 : // address. So within 24h we will likely relay a given address once. This is to
2093 : // prevent a peer from unjustly giving their address better propagation by sending
2094 : // it to us repeatedly.
2095 :
2096 0 : if (!fReachable && !addr.IsRelayable()) return;
2097 :
2098 : // Relay to a limited number of other nodes
2099 : // Use deterministic randomness to send to the same nodes for 24 hours
2100 : // at a time so the m_addr_knowns of the chosen nodes prevent repeats
2101 0 : const uint64_t hash_addr{CServiceHash(0, 0)(addr)};
2102 0 : const auto current_time{GetTime<std::chrono::seconds>()};
2103 : // Adding address hash makes exact rotation time different per address, while preserving periodicity.
2104 0 : const uint64_t time_addr{(static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) / count_seconds(ROTATE_ADDR_RELAY_DEST_INTERVAL)};
2105 0 : const CSipHasher hasher{m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY)
2106 0 : .Write(hash_addr)
2107 0 : .Write(time_addr)};
2108 0 : FastRandomContext insecure_rand;
2109 :
2110 : // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers.
2111 0 : unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
2112 :
2113 0 : std::array<std::pair<uint64_t, Peer*>, 2> best{{{0, nullptr}, {0, nullptr}}};
2114 0 : assert(nRelayNodes <= best.size());
2115 :
2116 0 : LOCK(m_peer_mutex);
2117 :
2118 0 : for (auto& [id, peer] : m_peer_map) {
2119 0 : if (peer->m_addr_relay_enabled && id != originator && IsAddrCompatible(*peer, addr)) {
2120 0 : uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize();
2121 0 : for (unsigned int i = 0; i < nRelayNodes; i++) {
2122 0 : if (hashKey > best[i].first) {
2123 0 : std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
2124 0 : best[i] = std::make_pair(hashKey, peer.get());
2125 0 : break;
2126 : }
2127 0 : }
2128 0 : }
2129 : };
2130 :
2131 0 : for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
2132 0 : PushAddress(*best[i].second, addr, insecure_rand);
2133 0 : }
2134 0 : }
2135 :
2136 0 : void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv)
2137 : {
2138 0 : std::shared_ptr<const CBlock> a_recent_block;
2139 0 : std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
2140 : {
2141 0 : LOCK(m_most_recent_block_mutex);
2142 0 : a_recent_block = m_most_recent_block;
2143 0 : a_recent_compact_block = m_most_recent_compact_block;
2144 0 : }
2145 :
2146 0 : bool need_activate_chain = false;
2147 : {
2148 0 : LOCK(cs_main);
2149 0 : const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
2150 0 : if (pindex) {
2151 0 : if (pindex->HaveTxsDownloaded() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) &&
2152 0 : pindex->IsValid(BLOCK_VALID_TREE)) {
2153 : // If we have the block and all of its parents, but have not yet validated it,
2154 : // we might be in the middle of connecting it (ie in the unlock of cs_main
2155 : // before ActivateBestChain but after AcceptBlock).
2156 : // In this case, we need to run ActivateBestChain prior to checking the relay
2157 : // conditions below.
2158 0 : need_activate_chain = true;
2159 0 : }
2160 0 : }
2161 0 : } // release cs_main before calling ActivateBestChain
2162 0 : if (need_activate_chain) {
2163 0 : BlockValidationState state;
2164 0 : if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
2165 0 : LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
2166 0 : }
2167 0 : }
2168 :
2169 0 : LOCK(cs_main);
2170 0 : const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash);
2171 0 : if (!pindex) {
2172 0 : return;
2173 : }
2174 0 : if (!BlockRequestAllowed(pindex)) {
2175 0 : LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId());
2176 0 : return;
2177 : }
2178 0 : const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2179 : // disconnect node in case we have reached the outbound limit for serving historical blocks
2180 0 : if (m_connman.OutboundTargetReached(true) &&
2181 0 : (((m_chainman.m_best_header != nullptr) && (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) &&
2182 0 : !pfrom.HasPermission(NetPermissionFlags::Download) // nodes with the download permission may exceed target
2183 : ) {
2184 0 : LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId());
2185 0 : pfrom.fDisconnect = true;
2186 0 : return;
2187 : }
2188 : // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
2189 0 : if (!pfrom.HasPermission(NetPermissionFlags::NoBan) && (
2190 0 : (((peer.m_our_services & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) && (m_chainman.ActiveChain().Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
2191 : )) {
2192 0 : LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold, disconnect peer=%d\n", pfrom.GetId());
2193 : //disconnect node and prevent it from stalling (would otherwise wait for the missing block)
2194 0 : pfrom.fDisconnect = true;
2195 0 : return;
2196 : }
2197 : // Pruned nodes may have deleted the block, so check whether
2198 : // it's available before trying to send.
2199 0 : if (!(pindex->nStatus & BLOCK_HAVE_DATA)) {
2200 0 : return;
2201 : }
2202 0 : std::shared_ptr<const CBlock> pblock;
2203 0 : if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
2204 0 : pblock = a_recent_block;
2205 0 : } else if (inv.IsMsgWitnessBlk()) {
2206 : // Fast-path: in this case it is possible to serve the block directly from disk,
2207 : // as the network format matches the format on disk
2208 0 : std::vector<uint8_t> block_data;
2209 0 : if (!m_chainman.m_blockman.ReadRawBlockFromDisk(block_data, pindex->GetBlockPos())) {
2210 0 : assert(!"cannot load block from disk");
2211 : }
2212 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, Span{block_data}));
2213 : // Don't set pblock as we've sent the block
2214 0 : } else {
2215 : // Send block from disk
2216 0 : std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
2217 0 : if (!m_chainman.m_blockman.ReadBlockFromDisk(*pblockRead, *pindex)) {
2218 0 : assert(!"cannot load block from disk");
2219 : }
2220 0 : pblock = pblockRead;
2221 0 : }
2222 0 : if (pblock) {
2223 0 : if (inv.IsMsgBlk()) {
2224 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
2225 0 : } else if (inv.IsMsgWitnessBlk()) {
2226 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
2227 0 : } else if (inv.IsMsgFilteredBlk()) {
2228 0 : bool sendMerkleBlock = false;
2229 0 : CMerkleBlock merkleBlock;
2230 0 : if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) {
2231 0 : LOCK(tx_relay->m_bloom_filter_mutex);
2232 0 : if (tx_relay->m_bloom_filter) {
2233 0 : sendMerkleBlock = true;
2234 0 : merkleBlock = CMerkleBlock(*pblock, *tx_relay->m_bloom_filter);
2235 0 : }
2236 0 : }
2237 0 : if (sendMerkleBlock) {
2238 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
2239 : // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
2240 : // This avoids hurting performance by pointlessly requiring a round-trip
2241 : // Note that there is currently no way for a node to request any single transactions we didn't send here -
2242 : // they must either disconnect and retry or request the full block.
2243 : // Thus, the protocol spec specified allows for us to provide duplicate txn here,
2244 : // however we MUST always provide at least what the remote peer needs
2245 : typedef std::pair<unsigned int, uint256> PairType;
2246 0 : for (PairType& pair : merkleBlock.vMatchedTxn)
2247 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
2248 0 : }
2249 : // else
2250 : // no response
2251 0 : } else if (inv.IsMsgCmpctBlk()) {
2252 : // If a peer is asking for old blocks, we're almost guaranteed
2253 : // they won't have a useful mempool to match against a compact block,
2254 : // and we don't feel like constructing the object for them, so
2255 : // instead we respond with the full, non-compact block.
2256 0 : if (CanDirectFetch() && pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_CMPCTBLOCK_DEPTH) {
2257 0 : if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) {
2258 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
2259 0 : } else {
2260 0 : CBlockHeaderAndShortTxIDs cmpctblock{*pblock};
2261 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::CMPCTBLOCK, cmpctblock));
2262 0 : }
2263 0 : } else {
2264 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
2265 : }
2266 0 : }
2267 0 : }
2268 :
2269 : {
2270 0 : LOCK(peer.m_block_inv_mutex);
2271 : // Trigger the peer node to send a getblocks request for the next batch of inventory
2272 0 : if (inv.hash == peer.m_continuation_block) {
2273 : // Send immediately. This must send even if redundant,
2274 : // and we want it right after the last block so they don't
2275 : // wait for other stuff first.
2276 0 : std::vector<CInv> vInv;
2277 0 : vInv.push_back(CInv(MSG_BLOCK, m_chainman.ActiveChain().Tip()->GetBlockHash()));
2278 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv));
2279 0 : peer.m_continuation_block.SetNull();
2280 0 : }
2281 0 : }
2282 0 : }
2283 :
2284 0 : CTransactionRef PeerManagerImpl::FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid)
2285 : {
2286 : // If a tx was in the mempool prior to the last INV for this peer, permit the request.
2287 0 : auto txinfo = m_mempool.info_for_relay(gtxid, tx_relay.m_last_inv_sequence);
2288 0 : if (txinfo.tx) {
2289 0 : return std::move(txinfo.tx);
2290 : }
2291 :
2292 : // Or it might be from the most recent block
2293 : {
2294 0 : LOCK(m_most_recent_block_mutex);
2295 0 : if (m_most_recent_block_txs != nullptr) {
2296 0 : auto it = m_most_recent_block_txs->find(gtxid.GetHash());
2297 0 : if (it != m_most_recent_block_txs->end()) return it->second;
2298 0 : }
2299 0 : }
2300 :
2301 0 : return {};
2302 0 : }
2303 :
2304 0 : void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc)
2305 : {
2306 0 : AssertLockNotHeld(cs_main);
2307 :
2308 0 : auto tx_relay = peer.GetTxRelay();
2309 :
2310 0 : std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
2311 0 : std::vector<CInv> vNotFound;
2312 0 : const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2313 :
2314 : // Process as many TX items from the front of the getdata queue as
2315 : // possible, since they're common and it's efficient to batch process
2316 : // them.
2317 0 : while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) {
2318 0 : if (interruptMsgProc) return;
2319 : // The send buffer provides backpressure. If there's no space in
2320 : // the buffer, pause processing until the next call.
2321 0 : if (pfrom.fPauseSend) break;
2322 :
2323 0 : const CInv &inv = *it++;
2324 :
2325 0 : if (tx_relay == nullptr) {
2326 : // Ignore GETDATA requests for transactions from block-relay-only
2327 : // peers and peers that asked us not to announce transactions.
2328 0 : continue;
2329 : }
2330 :
2331 0 : CTransactionRef tx = FindTxForGetData(*tx_relay, ToGenTxid(inv));
2332 0 : if (tx) {
2333 : // WTX and WITNESS_TX imply we serialize with witness
2334 0 : int nSendFlags = (inv.IsMsgTx() ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
2335 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
2336 0 : m_mempool.RemoveUnbroadcastTx(tx->GetHash());
2337 0 : } else {
2338 0 : vNotFound.push_back(inv);
2339 : }
2340 0 : }
2341 :
2342 : // Only process one BLOCK item per call, since they're uncommon and can be
2343 : // expensive to process.
2344 0 : if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
2345 0 : const CInv &inv = *it++;
2346 0 : if (inv.IsGenBlkMsg()) {
2347 0 : ProcessGetBlockData(pfrom, peer, inv);
2348 0 : }
2349 : // else: If the first item on the queue is an unknown type, we erase it
2350 : // and continue processing the queue on the next call.
2351 0 : }
2352 :
2353 0 : peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
2354 :
2355 0 : if (!vNotFound.empty()) {
2356 : // Let the peer know that we didn't find what it asked for, so it doesn't
2357 : // have to wait around forever.
2358 : // SPV clients care about this message: it's needed when they are
2359 : // recursively walking the dependencies of relevant unconfirmed
2360 : // transactions. SPV clients want to do that because they want to know
2361 : // about (and store and rebroadcast and risk analyze) the dependencies
2362 : // of transactions relevant to them, without having to download the
2363 : // entire memory pool.
2364 : // Also, other nodes can use these messages to automatically request a
2365 : // transaction from some other peer that annnounced it, and stop
2366 : // waiting for us to respond.
2367 : // In normal operation, we often send NOTFOUND messages for parents of
2368 : // transactions that we relay; if a peer is missing a parent, they may
2369 : // assume we have them and request the parents from us.
2370 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
2371 0 : }
2372 0 : }
2373 :
2374 0 : uint32_t PeerManagerImpl::GetFetchFlags(const Peer& peer) const
2375 : {
2376 0 : uint32_t nFetchFlags = 0;
2377 0 : if (CanServeWitnesses(peer)) {
2378 0 : nFetchFlags |= MSG_WITNESS_FLAG;
2379 0 : }
2380 0 : return nFetchFlags;
2381 : }
2382 :
2383 0 : void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req)
2384 : {
2385 0 : BlockTransactions resp(req);
2386 0 : for (size_t i = 0; i < req.indexes.size(); i++) {
2387 0 : if (req.indexes[i] >= block.vtx.size()) {
2388 0 : Misbehaving(peer, 100, "getblocktxn with out-of-bounds tx indices");
2389 0 : return;
2390 : }
2391 0 : resp.txn[i] = block.vtx[req.indexes[i]];
2392 0 : }
2393 :
2394 0 : const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2395 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCKTXN, resp));
2396 0 : }
2397 :
2398 0 : bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer)
2399 : {
2400 : // Do these headers have proof-of-work matching what's claimed?
2401 0 : if (!HasValidProofOfWork(headers, consensusParams)) {
2402 0 : Misbehaving(peer, 100, "header with invalid proof of work");
2403 0 : return false;
2404 : }
2405 :
2406 : // Are these headers connected to each other?
2407 0 : if (!CheckHeadersAreContinuous(headers)) {
2408 0 : Misbehaving(peer, 20, "non-continuous headers sequence");
2409 0 : return false;
2410 : }
2411 0 : return true;
2412 0 : }
2413 :
2414 0 : arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold()
2415 : {
2416 0 : arith_uint256 near_chaintip_work = 0;
2417 0 : LOCK(cs_main);
2418 0 : if (m_chainman.ActiveChain().Tip() != nullptr) {
2419 0 : const CBlockIndex *tip = m_chainman.ActiveChain().Tip();
2420 : // Use a 144 block buffer, so that we'll accept headers that fork from
2421 : // near our tip.
2422 0 : near_chaintip_work = tip->nChainWork - std::min<arith_uint256>(144*GetBlockProof(*tip), tip->nChainWork);
2423 0 : }
2424 0 : return std::max(near_chaintip_work, m_chainman.MinimumChainWork());
2425 0 : }
2426 :
2427 : /**
2428 : * Special handling for unconnecting headers that might be part of a block
2429 : * announcement.
2430 : *
2431 : * We'll send a getheaders message in response to try to connect the chain.
2432 : *
2433 : * The peer can send up to MAX_NUM_UNCONNECTING_HEADERS_MSGS in a row that
2434 : * don't connect before given DoS points.
2435 : *
2436 : * Once a headers message is received that is valid and does connect,
2437 : * m_num_unconnecting_headers_msgs gets reset back to 0.
2438 : */
2439 0 : void PeerManagerImpl::HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer,
2440 : const std::vector<CBlockHeader>& headers)
2441 : {
2442 0 : peer.m_num_unconnecting_headers_msgs++;
2443 : // Try to fill in the missing headers.
2444 0 : const CBlockIndex* best_header{WITH_LOCK(cs_main, return m_chainman.m_best_header)};
2445 0 : if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) {
2446 0 : LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, m_num_unconnecting_headers_msgs=%d)\n",
2447 : headers[0].GetHash().ToString(),
2448 : headers[0].hashPrevBlock.ToString(),
2449 : best_header->nHeight,
2450 : pfrom.GetId(), peer.m_num_unconnecting_headers_msgs);
2451 0 : }
2452 :
2453 : // Set hashLastUnknownBlock for this peer, so that if we
2454 : // eventually get the headers - even from a different peer -
2455 : // we can use this peer to download.
2456 0 : WITH_LOCK(cs_main, UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()));
2457 :
2458 : // The peer may just be broken, so periodically assign DoS points if this
2459 : // condition persists.
2460 0 : if (peer.m_num_unconnecting_headers_msgs % MAX_NUM_UNCONNECTING_HEADERS_MSGS == 0) {
2461 0 : Misbehaving(peer, 20, strprintf("%d non-connecting headers", peer.m_num_unconnecting_headers_msgs));
2462 0 : }
2463 0 : }
2464 :
2465 0 : bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const
2466 : {
2467 0 : uint256 hashLastBlock;
2468 0 : for (const CBlockHeader& header : headers) {
2469 0 : if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
2470 0 : return false;
2471 : }
2472 0 : hashLastBlock = header.GetHash();
2473 : }
2474 0 : return true;
2475 0 : }
2476 :
2477 0 : bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, std::vector<CBlockHeader>& headers)
2478 : {
2479 0 : if (peer.m_headers_sync) {
2480 0 : auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == MAX_HEADERS_RESULTS);
2481 0 : if (result.request_more) {
2482 0 : auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
2483 : // If we were instructed to ask for a locator, it should not be empty.
2484 0 : Assume(!locator.vHave.empty());
2485 0 : if (!locator.vHave.empty()) {
2486 : // It should be impossible for the getheaders request to fail,
2487 : // because we should have cleared the last getheaders timestamp
2488 : // when processing the headers that triggered this call. But
2489 : // it may be possible to bypass this via compactblock
2490 : // processing, so check the result before logging just to be
2491 : // safe.
2492 0 : bool sent_getheaders = MaybeSendGetHeaders(pfrom, locator, peer);
2493 0 : if (sent_getheaders) {
2494 0 : LogPrint(BCLog::NET, "more getheaders (from %s) to peer=%d\n",
2495 : locator.vHave.front().ToString(), pfrom.GetId());
2496 0 : } else {
2497 0 : LogPrint(BCLog::NET, "error sending next getheaders (from %s) to continue sync with peer=%d\n",
2498 : locator.vHave.front().ToString(), pfrom.GetId());
2499 : }
2500 0 : }
2501 0 : }
2502 :
2503 0 : if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) {
2504 0 : peer.m_headers_sync.reset(nullptr);
2505 :
2506 : // Delete this peer's entry in m_headers_presync_stats.
2507 : // If this is m_headers_presync_bestpeer, it will be replaced later
2508 : // by the next peer that triggers the else{} branch below.
2509 0 : LOCK(m_headers_presync_mutex);
2510 0 : m_headers_presync_stats.erase(pfrom.GetId());
2511 0 : } else {
2512 : // Build statistics for this peer's sync.
2513 0 : HeadersPresyncStats stats;
2514 0 : stats.first = peer.m_headers_sync->GetPresyncWork();
2515 0 : if (peer.m_headers_sync->GetState() == HeadersSyncState::State::PRESYNC) {
2516 0 : stats.second = {peer.m_headers_sync->GetPresyncHeight(),
2517 0 : peer.m_headers_sync->GetPresyncTime()};
2518 0 : }
2519 :
2520 : // Update statistics in stats.
2521 0 : LOCK(m_headers_presync_mutex);
2522 0 : m_headers_presync_stats[pfrom.GetId()] = stats;
2523 0 : auto best_it = m_headers_presync_stats.find(m_headers_presync_bestpeer);
2524 0 : bool best_updated = false;
2525 0 : if (best_it == m_headers_presync_stats.end()) {
2526 : // If the cached best peer is outdated, iterate over all remaining ones (including
2527 : // newly updated one) to find the best one.
2528 0 : NodeId peer_best{-1};
2529 0 : const HeadersPresyncStats* stat_best{nullptr};
2530 0 : for (const auto& [peer, stat] : m_headers_presync_stats) {
2531 0 : if (!stat_best || stat > *stat_best) {
2532 0 : peer_best = peer;
2533 0 : stat_best = &stat;
2534 0 : }
2535 : }
2536 0 : m_headers_presync_bestpeer = peer_best;
2537 0 : best_updated = (peer_best == pfrom.GetId());
2538 0 : } else if (best_it->first == pfrom.GetId() || stats > best_it->second) {
2539 : // pfrom was and remains the best peer, or pfrom just became best.
2540 0 : m_headers_presync_bestpeer = pfrom.GetId();
2541 0 : best_updated = true;
2542 0 : }
2543 0 : if (best_updated && stats.second.has_value()) {
2544 : // If the best peer updated, and it is in its first phase, signal.
2545 0 : m_headers_presync_should_signal = true;
2546 0 : }
2547 0 : }
2548 :
2549 0 : if (result.success) {
2550 : // We only overwrite the headers passed in if processing was
2551 : // successful.
2552 0 : headers.swap(result.pow_validated_headers);
2553 0 : }
2554 :
2555 0 : return result.success;
2556 0 : }
2557 : // Either we didn't have a sync in progress, or something went wrong
2558 : // processing these headers, or we are returning headers to the caller to
2559 : // process.
2560 0 : return false;
2561 0 : }
2562 :
2563 0 : bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlockIndex* chain_start_header, std::vector<CBlockHeader>& headers)
2564 : {
2565 : // Calculate the total work on this chain.
2566 0 : arith_uint256 total_work = chain_start_header->nChainWork + CalculateHeadersWork(headers);
2567 :
2568 : // Our dynamic anti-DoS threshold (minimum work required on a headers chain
2569 : // before we'll store it)
2570 0 : arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold();
2571 :
2572 : // Avoid DoS via low-difficulty-headers by only processing if the headers
2573 : // are part of a chain with sufficient work.
2574 0 : if (total_work < minimum_chain_work) {
2575 : // Only try to sync with this peer if their headers message was full;
2576 : // otherwise they don't have more headers after this so no point in
2577 : // trying to sync their too-little-work chain.
2578 0 : if (headers.size() == MAX_HEADERS_RESULTS) {
2579 : // Note: we could advance to the last header in this set that is
2580 : // known to us, rather than starting at the first header (which we
2581 : // may already have); however this is unlikely to matter much since
2582 : // ProcessHeadersMessage() already handles the case where all
2583 : // headers in a received message are already known and are
2584 : // ancestors of m_best_header or chainActive.Tip(), by skipping
2585 : // this logic in that case. So even if the first header in this set
2586 : // of headers is known, some header in this set must be new, so
2587 : // advancing to the first unknown header would be a small effect.
2588 0 : LOCK(peer.m_headers_sync_mutex);
2589 0 : peer.m_headers_sync.reset(new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(),
2590 0 : chain_start_header, minimum_chain_work));
2591 :
2592 : // Now a HeadersSyncState object for tracking this synchronization
2593 : // is created, process the headers using it as normal. Failures are
2594 : // handled inside of IsContinuationOfLowWorkHeadersSync.
2595 0 : (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
2596 0 : } else {
2597 0 : LogPrint(BCLog::NET, "Ignoring low-work chain (height=%u) from peer=%d\n", chain_start_header->nHeight + headers.size(), pfrom.GetId());
2598 : }
2599 :
2600 : // The peer has not yet given us a chain that meets our work threshold,
2601 : // so we want to prevent further processing of the headers in any case.
2602 0 : headers = {};
2603 0 : return true;
2604 : }
2605 :
2606 0 : return false;
2607 0 : }
2608 :
2609 0 : bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex* header)
2610 : {
2611 0 : if (header == nullptr) {
2612 0 : return false;
2613 0 : } else if (m_chainman.m_best_header != nullptr && header == m_chainman.m_best_header->GetAncestor(header->nHeight)) {
2614 0 : return true;
2615 0 : } else if (m_chainman.ActiveChain().Contains(header)) {
2616 0 : return true;
2617 : }
2618 0 : return false;
2619 0 : }
2620 :
2621 0 : bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer)
2622 : {
2623 0 : const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2624 :
2625 0 : const auto current_time = NodeClock::now();
2626 :
2627 : // Only allow a new getheaders message to go out if we don't have a recent
2628 : // one already in-flight
2629 0 : if (current_time - peer.m_last_getheaders_timestamp > HEADERS_RESPONSE_TIME) {
2630 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, locator, uint256()));
2631 0 : peer.m_last_getheaders_timestamp = current_time;
2632 0 : return true;
2633 : }
2634 0 : return false;
2635 0 : }
2636 :
2637 : /*
2638 : * Given a new headers tip ending in last_header, potentially request blocks towards that tip.
2639 : * We require that the given tip have at least as much work as our tip, and for
2640 : * our current tip to be "close to synced" (see CanDirectFetch()).
2641 : */
2642 0 : void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header)
2643 : {
2644 0 : const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2645 :
2646 0 : LOCK(cs_main);
2647 0 : CNodeState *nodestate = State(pfrom.GetId());
2648 :
2649 0 : if (CanDirectFetch() && last_header.IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= last_header.nChainWork) {
2650 0 : std::vector<const CBlockIndex*> vToFetch;
2651 0 : const CBlockIndex* pindexWalk{&last_header};
2652 : // Calculate all the blocks we'd need to switch to last_header, up to a limit.
2653 0 : while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2654 0 : if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
2655 0 : !IsBlockRequested(pindexWalk->GetBlockHash()) &&
2656 0 : (!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || CanServeWitnesses(peer))) {
2657 : // We don't have this block, and it's not yet in flight.
2658 0 : vToFetch.push_back(pindexWalk);
2659 0 : }
2660 0 : pindexWalk = pindexWalk->pprev;
2661 : }
2662 : // If pindexWalk still isn't on our main chain, we're looking at a
2663 : // very large reorg at a time we think we're close to caught up to
2664 : // the main chain -- this shouldn't really happen. Bail out on the
2665 : // direct fetch and rely on parallel download instead.
2666 0 : if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
2667 0 : LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
2668 : last_header.GetBlockHash().ToString(),
2669 : last_header.nHeight);
2670 0 : } else {
2671 0 : std::vector<CInv> vGetData;
2672 : // Download as much as possible, from earliest to latest.
2673 0 : for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
2674 0 : if (nodestate->vBlocksInFlight.size() >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2675 : // Can't download any more from this peer
2676 0 : break;
2677 : }
2678 0 : uint32_t nFetchFlags = GetFetchFlags(peer);
2679 0 : vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
2680 0 : BlockRequested(pfrom.GetId(), *pindex);
2681 0 : LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
2682 : pindex->GetBlockHash().ToString(), pfrom.GetId());
2683 : }
2684 0 : if (vGetData.size() > 1) {
2685 0 : LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
2686 : last_header.GetBlockHash().ToString(),
2687 : last_header.nHeight);
2688 0 : }
2689 0 : if (vGetData.size() > 0) {
2690 0 : if (!m_opts.ignore_incoming_txs &&
2691 0 : nodestate->m_provides_cmpctblocks &&
2692 0 : vGetData.size() == 1 &&
2693 0 : mapBlocksInFlight.size() == 1 &&
2694 0 : last_header.pprev->IsValid(BLOCK_VALID_CHAIN)) {
2695 : // In any case, we want to download using a compact block, not a regular one
2696 0 : vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
2697 0 : }
2698 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
2699 0 : }
2700 0 : }
2701 0 : }
2702 0 : }
2703 :
2704 : /**
2705 : * Given receipt of headers from a peer ending in last_header, along with
2706 : * whether that header was new and whether the headers message was full,
2707 : * update the state we keep for the peer.
2708 : */
2709 0 : void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer,
2710 : const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
2711 : {
2712 0 : if (peer.m_num_unconnecting_headers_msgs > 0) {
2713 0 : LogPrint(BCLog::NET, "peer=%d: resetting m_num_unconnecting_headers_msgs (%d -> 0)\n", pfrom.GetId(), peer.m_num_unconnecting_headers_msgs);
2714 0 : }
2715 0 : peer.m_num_unconnecting_headers_msgs = 0;
2716 :
2717 0 : LOCK(cs_main);
2718 0 : CNodeState *nodestate = State(pfrom.GetId());
2719 :
2720 0 : UpdateBlockAvailability(pfrom.GetId(), last_header.GetBlockHash());
2721 :
2722 : // From here, pindexBestKnownBlock should be guaranteed to be non-null,
2723 : // because it is set in UpdateBlockAvailability. Some nullptr checks
2724 : // are still present, however, as belt-and-suspenders.
2725 :
2726 0 : if (received_new_header && last_header.nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
2727 0 : nodestate->m_last_block_announcement = GetTime();
2728 0 : }
2729 :
2730 : // If we're in IBD, we want outbound peers that will serve us a useful
2731 : // chain. Disconnect peers that are on chains with insufficient work.
2732 0 : if (m_chainman.IsInitialBlockDownload() && !may_have_more_headers) {
2733 : // If the peer has no more headers to give us, then we know we have
2734 : // their tip.
2735 0 : if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) {
2736 : // This peer has too little work on their headers chain to help
2737 : // us sync -- disconnect if it is an outbound disconnection
2738 : // candidate.
2739 : // Note: We compare their tip to the minimum chain work (rather than
2740 : // m_chainman.ActiveChain().Tip()) because we won't start block download
2741 : // until we have a headers chain that has at least
2742 : // the minimum chain work, even if a peer has a chain past our tip,
2743 : // as an anti-DoS measure.
2744 0 : if (pfrom.IsOutboundOrBlockRelayConn()) {
2745 0 : LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
2746 0 : pfrom.fDisconnect = true;
2747 0 : }
2748 0 : }
2749 0 : }
2750 :
2751 : // If this is an outbound full-relay peer, check to see if we should protect
2752 : // it from the bad/lagging chain logic.
2753 : // Note that outbound block-relay peers are excluded from this protection, and
2754 : // thus always subject to eviction under the bad/lagging chain logic.
2755 : // See ChainSyncTimeoutState.
2756 0 : if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
2757 0 : if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
2758 0 : LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
2759 0 : nodestate->m_chain_sync.m_protect = true;
2760 0 : ++m_outbound_peers_with_protect_from_disconnect;
2761 0 : }
2762 0 : }
2763 0 : }
2764 :
2765 0 : void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
2766 : std::vector<CBlockHeader>&& headers,
2767 : bool via_compact_block)
2768 : {
2769 0 : size_t nCount = headers.size();
2770 :
2771 0 : if (nCount == 0) {
2772 : // Nothing interesting. Stop asking this peers for more headers.
2773 : // If we were in the middle of headers sync, receiving an empty headers
2774 : // message suggests that the peer suddenly has nothing to give us
2775 : // (perhaps it reorged to our chain). Clear download state for this peer.
2776 0 : LOCK(peer.m_headers_sync_mutex);
2777 0 : if (peer.m_headers_sync) {
2778 0 : peer.m_headers_sync.reset(nullptr);
2779 0 : LOCK(m_headers_presync_mutex);
2780 0 : m_headers_presync_stats.erase(pfrom.GetId());
2781 0 : }
2782 : return;
2783 0 : }
2784 :
2785 : // Before we do any processing, make sure these pass basic sanity checks.
2786 : // We'll rely on headers having valid proof-of-work further down, as an
2787 : // anti-DoS criteria (note: this check is required before passing any
2788 : // headers into HeadersSyncState).
2789 0 : if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) {
2790 : // Misbehaving() calls are handled within CheckHeadersPoW(), so we can
2791 : // just return. (Note that even if a header is announced via compact
2792 : // block, the header itself should be valid, so this type of error can
2793 : // always be punished.)
2794 0 : return;
2795 : }
2796 :
2797 0 : const CBlockIndex *pindexLast = nullptr;
2798 :
2799 : // We'll set already_validated_work to true if these headers are
2800 : // successfully processed as part of a low-work headers sync in progress
2801 : // (either in PRESYNC or REDOWNLOAD phase).
2802 : // If true, this will mean that any headers returned to us (ie during
2803 : // REDOWNLOAD) can be validated without further anti-DoS checks.
2804 0 : bool already_validated_work = false;
2805 :
2806 : // If we're in the middle of headers sync, let it do its magic.
2807 0 : bool have_headers_sync = false;
2808 : {
2809 0 : LOCK(peer.m_headers_sync_mutex);
2810 :
2811 0 : already_validated_work = IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
2812 :
2813 : // The headers we passed in may have been:
2814 : // - untouched, perhaps if no headers-sync was in progress, or some
2815 : // failure occurred
2816 : // - erased, such as if the headers were successfully processed and no
2817 : // additional headers processing needs to take place (such as if we
2818 : // are still in PRESYNC)
2819 : // - replaced with headers that are now ready for validation, such as
2820 : // during the REDOWNLOAD phase of a low-work headers sync.
2821 : // So just check whether we still have headers that we need to process,
2822 : // or not.
2823 0 : if (headers.empty()) {
2824 0 : return;
2825 : }
2826 :
2827 0 : have_headers_sync = !!peer.m_headers_sync;
2828 0 : }
2829 :
2830 : // Do these headers connect to something in our block index?
2831 0 : const CBlockIndex *chain_start_header{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock))};
2832 0 : bool headers_connect_blockindex{chain_start_header != nullptr};
2833 :
2834 0 : if (!headers_connect_blockindex) {
2835 0 : if (nCount <= MAX_BLOCKS_TO_ANNOUNCE) {
2836 : // If this looks like it could be a BIP 130 block announcement, use
2837 : // special logic for handling headers that don't connect, as this
2838 : // could be benign.
2839 0 : HandleFewUnconnectingHeaders(pfrom, peer, headers);
2840 0 : } else {
2841 0 : Misbehaving(peer, 10, "invalid header received");
2842 : }
2843 0 : return;
2844 : }
2845 :
2846 : // If the headers we received are already in memory and an ancestor of
2847 : // m_best_header or our tip, skip anti-DoS checks. These headers will not
2848 : // use any more memory (and we are not leaking information that could be
2849 : // used to fingerprint us).
2850 0 : const CBlockIndex *last_received_header{nullptr};
2851 : {
2852 0 : LOCK(cs_main);
2853 0 : last_received_header = m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash());
2854 0 : if (IsAncestorOfBestHeaderOrTip(last_received_header)) {
2855 0 : already_validated_work = true;
2856 0 : }
2857 0 : }
2858 :
2859 : // If our peer has NetPermissionFlags::NoBan privileges, then bypass our
2860 : // anti-DoS logic (this saves bandwidth when we connect to a trusted peer
2861 : // on startup).
2862 0 : if (pfrom.HasPermission(NetPermissionFlags::NoBan)) {
2863 0 : already_validated_work = true;
2864 0 : }
2865 :
2866 : // At this point, the headers connect to something in our block index.
2867 : // Do anti-DoS checks to determine if we should process or store for later
2868 : // processing.
2869 0 : if (!already_validated_work && TryLowWorkHeadersSync(peer, pfrom,
2870 0 : chain_start_header, headers)) {
2871 : // If we successfully started a low-work headers sync, then there
2872 : // should be no headers to process any further.
2873 0 : Assume(headers.empty());
2874 0 : return;
2875 : }
2876 :
2877 : // At this point, we have a set of headers with sufficient work on them
2878 : // which can be processed.
2879 :
2880 : // If we don't have the last header, then this peer will have given us
2881 : // something new (if these headers are valid).
2882 0 : bool received_new_header{last_received_header == nullptr};
2883 :
2884 : // Now process all the headers.
2885 0 : BlockValidationState state;
2886 0 : if (!m_chainman.ProcessNewBlockHeaders(headers, /*min_pow_checked=*/true, state, &pindexLast)) {
2887 0 : if (state.IsInvalid()) {
2888 0 : MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received");
2889 0 : return;
2890 : }
2891 0 : }
2892 0 : assert(pindexLast);
2893 :
2894 : // Consider fetching more headers if we are not using our headers-sync mechanism.
2895 0 : if (nCount == MAX_HEADERS_RESULTS && !have_headers_sync) {
2896 : // Headers message had its maximum size; the peer may have more headers.
2897 0 : if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) {
2898 0 : LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
2899 : pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
2900 0 : }
2901 0 : }
2902 :
2903 0 : UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS);
2904 :
2905 : // Consider immediately downloading blocks.
2906 0 : HeadersDirectFetchBlocks(pfrom, peer, *pindexLast);
2907 :
2908 0 : return;
2909 0 : }
2910 :
2911 0 : bool PeerManagerImpl::ProcessOrphanTx(Peer& peer)
2912 : {
2913 0 : AssertLockHeld(g_msgproc_mutex);
2914 0 : LOCK(cs_main);
2915 :
2916 0 : CTransactionRef porphanTx = nullptr;
2917 :
2918 0 : while (CTransactionRef porphanTx = m_orphanage.GetTxToReconsider(peer.m_id)) {
2919 0 : const MempoolAcceptResult result = m_chainman.ProcessTransaction(porphanTx);
2920 0 : const TxValidationState& state = result.m_state;
2921 0 : const uint256& orphanHash = porphanTx->GetHash();
2922 0 : const uint256& orphan_wtxid = porphanTx->GetWitnessHash();
2923 :
2924 0 : if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
2925 0 : LogPrint(BCLog::TXPACKAGES, " accepted orphan tx %s (wtxid=%s)\n", orphanHash.ToString(), orphan_wtxid.ToString());
2926 0 : LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n",
2927 : peer.m_id,
2928 : orphanHash.ToString(),
2929 : orphan_wtxid.ToString(),
2930 : m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000);
2931 0 : RelayTransaction(orphanHash, porphanTx->GetWitnessHash());
2932 0 : m_orphanage.AddChildrenToWorkSet(*porphanTx);
2933 0 : m_orphanage.EraseTx(orphanHash);
2934 0 : for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) {
2935 0 : AddToCompactExtraTransactions(removedTx);
2936 : }
2937 0 : return true;
2938 0 : } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
2939 0 : if (state.IsInvalid()) {
2940 0 : LogPrint(BCLog::TXPACKAGES, " invalid orphan tx %s (wtxid=%s) from peer=%d. %s\n",
2941 : orphanHash.ToString(),
2942 : orphan_wtxid.ToString(),
2943 : peer.m_id,
2944 : state.ToString());
2945 0 : LogPrint(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n",
2946 : orphanHash.ToString(),
2947 : orphan_wtxid.ToString(),
2948 : peer.m_id,
2949 : state.ToString());
2950 : // Maybe punish peer that gave us an invalid orphan tx
2951 0 : MaybePunishNodeForTx(peer.m_id, state);
2952 0 : }
2953 : // Has inputs but not accepted to mempool
2954 : // Probably non-standard or insufficient fee
2955 0 : LogPrint(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s)\n", orphanHash.ToString(), orphan_wtxid.ToString());
2956 0 : if (state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) {
2957 : // We can add the wtxid of this transaction to our reject filter.
2958 : // Do not add txids of witness transactions or witness-stripped
2959 : // transactions to the filter, as they can have been malleated;
2960 : // adding such txids to the reject filter would potentially
2961 : // interfere with relay of valid transactions from peers that
2962 : // do not support wtxid-based relay. See
2963 : // https://github.com/bitcoin/bitcoin/issues/8279 for details.
2964 : // We can remove this restriction (and always add wtxids to
2965 : // the filter even for witness stripped transactions) once
2966 : // wtxid-based relay is broadly deployed.
2967 : // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
2968 : // for concerns around weakening security of unupgraded nodes
2969 : // if we start doing this too early.
2970 0 : m_recent_rejects.insert(porphanTx->GetWitnessHash());
2971 : // If the transaction failed for TX_INPUTS_NOT_STANDARD,
2972 : // then we know that the witness was irrelevant to the policy
2973 : // failure, since this check depends only on the txid
2974 : // (the scriptPubKey being spent is covered by the txid).
2975 : // Add the txid to the reject filter to prevent repeated
2976 : // processing of this transaction in the event that child
2977 : // transactions are later received (resulting in
2978 : // parent-fetching by txid via the orphan-handling logic).
2979 0 : if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && porphanTx->GetWitnessHash() != porphanTx->GetHash()) {
2980 : // We only add the txid if it differs from the wtxid, to
2981 : // avoid wasting entries in the rolling bloom filter.
2982 0 : m_recent_rejects.insert(porphanTx->GetHash());
2983 0 : }
2984 0 : }
2985 0 : m_orphanage.EraseTx(orphanHash);
2986 0 : return true;
2987 : }
2988 0 : }
2989 :
2990 0 : return false;
2991 0 : }
2992 :
2993 0 : bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer,
2994 : BlockFilterType filter_type, uint32_t start_height,
2995 : const uint256& stop_hash, uint32_t max_height_diff,
2996 : const CBlockIndex*& stop_index,
2997 : BlockFilterIndex*& filter_index)
2998 : {
2999 0 : const bool supported_filter_type =
3000 0 : (filter_type == BlockFilterType::BASIC &&
3001 0 : (peer.m_our_services & NODE_COMPACT_FILTERS));
3002 0 : if (!supported_filter_type) {
3003 0 : LogPrint(BCLog::NET, "peer %d requested unsupported block filter type: %d\n",
3004 : node.GetId(), static_cast<uint8_t>(filter_type));
3005 0 : node.fDisconnect = true;
3006 0 : return false;
3007 : }
3008 :
3009 : {
3010 0 : LOCK(cs_main);
3011 0 : stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash);
3012 :
3013 : // Check that the stop block exists and the peer would be allowed to fetch it.
3014 0 : if (!stop_index || !BlockRequestAllowed(stop_index)) {
3015 0 : LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
3016 : node.GetId(), stop_hash.ToString());
3017 0 : node.fDisconnect = true;
3018 0 : return false;
3019 : }
3020 0 : }
3021 :
3022 0 : uint32_t stop_height = stop_index->nHeight;
3023 0 : if (start_height > stop_height) {
3024 0 : LogPrint(BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with "
3025 : "start height %d and stop height %d\n",
3026 : node.GetId(), start_height, stop_height);
3027 0 : node.fDisconnect = true;
3028 0 : return false;
3029 : }
3030 0 : if (stop_height - start_height >= max_height_diff) {
3031 0 : LogPrint(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n",
3032 : node.GetId(), stop_height - start_height + 1, max_height_diff);
3033 0 : node.fDisconnect = true;
3034 0 : return false;
3035 : }
3036 :
3037 0 : filter_index = GetBlockFilterIndex(filter_type);
3038 0 : if (!filter_index) {
3039 0 : LogPrint(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type));
3040 0 : return false;
3041 : }
3042 :
3043 0 : return true;
3044 0 : }
3045 :
3046 0 : void PeerManagerImpl::ProcessGetCFilters(CNode& node,Peer& peer, CDataStream& vRecv)
3047 : {
3048 : uint8_t filter_type_ser;
3049 : uint32_t start_height;
3050 0 : uint256 stop_hash;
3051 :
3052 0 : vRecv >> filter_type_ser >> start_height >> stop_hash;
3053 :
3054 0 : const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3055 :
3056 : const CBlockIndex* stop_index;
3057 : BlockFilterIndex* filter_index;
3058 0 : if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash,
3059 : MAX_GETCFILTERS_SIZE, stop_index, filter_index)) {
3060 0 : return;
3061 : }
3062 :
3063 0 : std::vector<BlockFilter> filters;
3064 0 : if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
3065 0 : LogPrint(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
3066 : BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
3067 0 : return;
3068 : }
3069 :
3070 0 : for (const auto& filter : filters) {
3071 0 : CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion())
3072 0 : .Make(NetMsgType::CFILTER, filter);
3073 0 : m_connman.PushMessage(&node, std::move(msg));
3074 0 : }
3075 0 : }
3076 :
3077 0 : void PeerManagerImpl::ProcessGetCFHeaders(CNode& node, Peer& peer, CDataStream& vRecv)
3078 : {
3079 : uint8_t filter_type_ser;
3080 : uint32_t start_height;
3081 0 : uint256 stop_hash;
3082 :
3083 0 : vRecv >> filter_type_ser >> start_height >> stop_hash;
3084 :
3085 0 : const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3086 :
3087 : const CBlockIndex* stop_index;
3088 : BlockFilterIndex* filter_index;
3089 0 : if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash,
3090 : MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
3091 0 : return;
3092 : }
3093 :
3094 0 : uint256 prev_header;
3095 0 : if (start_height > 0) {
3096 0 : const CBlockIndex* const prev_block =
3097 0 : stop_index->GetAncestor(static_cast<int>(start_height - 1));
3098 0 : if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
3099 0 : LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
3100 : BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString());
3101 0 : return;
3102 : }
3103 0 : }
3104 :
3105 0 : std::vector<uint256> filter_hashes;
3106 0 : if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) {
3107 0 : LogPrint(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n",
3108 : BlockFilterTypeName(filter_type), start_height, stop_hash.ToString());
3109 0 : return;
3110 : }
3111 :
3112 0 : CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion())
3113 0 : .Make(NetMsgType::CFHEADERS,
3114 : filter_type_ser,
3115 0 : stop_index->GetBlockHash(),
3116 : prev_header,
3117 : filter_hashes);
3118 0 : m_connman.PushMessage(&node, std::move(msg));
3119 0 : }
3120 :
3121 0 : void PeerManagerImpl::ProcessGetCFCheckPt(CNode& node, Peer& peer, CDataStream& vRecv)
3122 : {
3123 : uint8_t filter_type_ser;
3124 0 : uint256 stop_hash;
3125 :
3126 0 : vRecv >> filter_type_ser >> stop_hash;
3127 :
3128 0 : const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser);
3129 :
3130 : const CBlockIndex* stop_index;
3131 : BlockFilterIndex* filter_index;
3132 0 : if (!PrepareBlockFilterRequest(node, peer, filter_type, /*start_height=*/0, stop_hash,
3133 0 : /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
3134 : stop_index, filter_index)) {
3135 0 : return;
3136 : }
3137 :
3138 0 : std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
3139 :
3140 : // Populate headers.
3141 0 : const CBlockIndex* block_index = stop_index;
3142 0 : for (int i = headers.size() - 1; i >= 0; i--) {
3143 0 : int height = (i + 1) * CFCHECKPT_INTERVAL;
3144 0 : block_index = block_index->GetAncestor(height);
3145 :
3146 0 : if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
3147 0 : LogPrint(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n",
3148 : BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString());
3149 0 : return;
3150 : }
3151 0 : }
3152 :
3153 0 : CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion())
3154 0 : .Make(NetMsgType::CFCHECKPT,
3155 : filter_type_ser,
3156 0 : stop_index->GetBlockHash(),
3157 : headers);
3158 0 : m_connman.PushMessage(&node, std::move(msg));
3159 0 : }
3160 :
3161 0 : void PeerManagerImpl::ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked)
3162 : {
3163 0 : bool new_block{false};
3164 0 : m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked, &new_block);
3165 0 : if (new_block) {
3166 0 : node.m_last_block_time = GetTime<std::chrono::seconds>();
3167 : // In case this block came from a different peer than we requested
3168 : // from, we can erase the block request now anyway (as we just stored
3169 : // this block to disk).
3170 0 : LOCK(cs_main);
3171 0 : RemoveBlockRequest(block->GetHash(), std::nullopt);
3172 0 : } else {
3173 0 : LOCK(cs_main);
3174 0 : mapBlockSource.erase(block->GetHash());
3175 0 : }
3176 0 : }
3177 :
3178 0 : void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions)
3179 : {
3180 0 : std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3181 0 : bool fBlockRead{false};
3182 0 : const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
3183 : {
3184 0 : LOCK(cs_main);
3185 :
3186 0 : auto range_flight = mapBlocksInFlight.equal_range(block_transactions.blockhash);
3187 0 : size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
3188 0 : bool requested_block_from_this_peer{false};
3189 :
3190 : // Multimap ensures ordering of outstanding requests. It's either empty or first in line.
3191 0 : bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId());
3192 :
3193 0 : while (range_flight.first != range_flight.second) {
3194 0 : auto [node_id, block_it] = range_flight.first->second;
3195 0 : if (node_id == pfrom.GetId() && block_it->partialBlock) {
3196 0 : requested_block_from_this_peer = true;
3197 0 : break;
3198 : }
3199 0 : range_flight.first++;
3200 : }
3201 :
3202 0 : if (!requested_block_from_this_peer) {
3203 0 : LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId());
3204 0 : return;
3205 : }
3206 :
3207 0 : PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock;
3208 0 : ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn);
3209 0 : if (status == READ_STATUS_INVALID) {
3210 0 : RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
3211 0 : Misbehaving(peer, 100, "invalid compact block/non-matching block transactions");
3212 0 : return;
3213 0 : } else if (status == READ_STATUS_FAILED) {
3214 0 : if (first_in_flight) {
3215 : // Might have collided, fall back to getdata now :(
3216 0 : std::vector<CInv> invs;
3217 0 : invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(peer), block_transactions.blockhash));
3218 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
3219 0 : } else {
3220 0 : RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId());
3221 0 : LogPrint(BCLog::NET, "Peer %d sent us a compact block but it failed to reconstruct, waiting on first download to complete\n", pfrom.GetId());
3222 0 : return;
3223 : }
3224 0 : } else {
3225 : // Block is either okay, or possibly we received
3226 : // READ_STATUS_CHECKBLOCK_FAILED.
3227 : // Note that CheckBlock can only fail for one of a few reasons:
3228 : // 1. bad-proof-of-work (impossible here, because we've already
3229 : // accepted the header)
3230 : // 2. merkleroot doesn't match the transactions given (already
3231 : // caught in FillBlock with READ_STATUS_FAILED, so
3232 : // impossible here)
3233 : // 3. the block is otherwise invalid (eg invalid coinbase,
3234 : // block is too big, too many legacy sigops, etc).
3235 : // So if CheckBlock failed, #3 is the only possibility.
3236 : // Under BIP 152, we don't discourage the peer unless proof of work is
3237 : // invalid (we don't require all the stateless checks to have
3238 : // been run). This is handled below, so just treat this as
3239 : // though the block was successfully read, and rely on the
3240 : // handling in ProcessNewBlock to ensure the block index is
3241 : // updated, etc.
3242 0 : RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // it is now an empty pointer
3243 0 : fBlockRead = true;
3244 : // mapBlockSource is used for potentially punishing peers and
3245 : // updating which peers send us compact blocks, so the race
3246 : // between here and cs_main in ProcessNewBlock is fine.
3247 : // BIP 152 permits peers to relay compact blocks after validating
3248 : // the header only; we should not punish peers if the block turns
3249 : // out to be invalid.
3250 0 : mapBlockSource.emplace(block_transactions.blockhash, std::make_pair(pfrom.GetId(), false));
3251 : }
3252 0 : } // Don't hold cs_main when we call into ProcessNewBlock
3253 0 : if (fBlockRead) {
3254 : // Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
3255 : // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
3256 : // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
3257 : // disk-space attacks), but this should be safe due to the
3258 : // protections in the compact block handler -- see related comment
3259 : // in compact block optimistic reconstruction handling.
3260 0 : ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true);
3261 0 : }
3262 0 : return;
3263 0 : }
3264 :
3265 0 : void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv,
3266 : const std::chrono::microseconds time_received,
3267 : const std::atomic<bool>& interruptMsgProc)
3268 : {
3269 0 : AssertLockHeld(g_msgproc_mutex);
3270 :
3271 0 : LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
3272 :
3273 0 : PeerRef peer = GetPeerRef(pfrom.GetId());
3274 0 : if (peer == nullptr) return;
3275 :
3276 0 : if (msg_type == NetMsgType::VERSION) {
3277 0 : if (pfrom.nVersion != 0) {
3278 0 : LogPrint(BCLog::NET, "redundant version message from peer=%d\n", pfrom.GetId());
3279 0 : return;
3280 : }
3281 :
3282 : int64_t nTime;
3283 0 : CService addrMe;
3284 0 : uint64_t nNonce = 1;
3285 : ServiceFlags nServices;
3286 : int nVersion;
3287 0 : std::string cleanSubVer;
3288 0 : int starting_height = -1;
3289 0 : bool fRelay = true;
3290 :
3291 0 : vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
3292 0 : if (nTime < 0) {
3293 0 : nTime = 0;
3294 0 : }
3295 0 : vRecv.ignore(8); // Ignore the addrMe service bits sent by the peer
3296 0 : vRecv >> CNetAddr::V1(addrMe);
3297 0 : if (!pfrom.IsInboundConn())
3298 : {
3299 0 : m_addrman.SetServices(pfrom.addr, nServices);
3300 0 : }
3301 0 : if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices))
3302 : {
3303 0 : LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom.GetId(), nServices, GetDesirableServiceFlags(nServices));
3304 0 : pfrom.fDisconnect = true;
3305 0 : return;
3306 : }
3307 :
3308 0 : if (nVersion < MIN_PEER_PROTO_VERSION) {
3309 : // disconnect from peers older than this proto version
3310 0 : LogPrint(BCLog::NET, "peer=%d using obsolete version %i; disconnecting\n", pfrom.GetId(), nVersion);
3311 0 : pfrom.fDisconnect = true;
3312 0 : return;
3313 : }
3314 :
3315 0 : if (!vRecv.empty()) {
3316 : // The version message includes information about the sending node which we don't use:
3317 : // - 8 bytes (service bits)
3318 : // - 16 bytes (ipv6 address)
3319 : // - 2 bytes (port)
3320 0 : vRecv.ignore(26);
3321 0 : vRecv >> nNonce;
3322 0 : }
3323 0 : if (!vRecv.empty()) {
3324 0 : std::string strSubVer;
3325 0 : vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
3326 0 : cleanSubVer = SanitizeString(strSubVer);
3327 0 : }
3328 0 : if (!vRecv.empty()) {
3329 0 : vRecv >> starting_height;
3330 0 : }
3331 0 : if (!vRecv.empty())
3332 0 : vRecv >> fRelay;
3333 : // Disconnect if we connected to ourself
3334 0 : if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce))
3335 : {
3336 0 : LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToStringAddrPort());
3337 0 : pfrom.fDisconnect = true;
3338 0 : return;
3339 : }
3340 :
3341 0 : if (pfrom.IsInboundConn() && addrMe.IsRoutable())
3342 : {
3343 0 : SeenLocal(addrMe);
3344 0 : }
3345 :
3346 : // Inbound peers send us their version message when they connect.
3347 : // We send our version message in response.
3348 0 : if (pfrom.IsInboundConn()) {
3349 0 : PushNodeVersion(pfrom, *peer);
3350 0 : }
3351 :
3352 : // Change version
3353 0 : const int greatest_common_version = std::min(nVersion, PROTOCOL_VERSION);
3354 0 : pfrom.SetCommonVersion(greatest_common_version);
3355 0 : pfrom.nVersion = nVersion;
3356 :
3357 0 : const CNetMsgMaker msg_maker(greatest_common_version);
3358 :
3359 0 : if (greatest_common_version >= WTXID_RELAY_VERSION) {
3360 0 : m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::WTXIDRELAY));
3361 0 : }
3362 :
3363 : // Signal ADDRv2 support (BIP155).
3364 0 : if (greatest_common_version >= 70016) {
3365 : // BIP155 defines addrv2 and sendaddrv2 for all protocol versions, but some
3366 : // implementations reject messages they don't know. As a courtesy, don't send
3367 : // it to nodes with a version before 70016, as no software is known to support
3368 : // BIP155 that doesn't announce at least that protocol version number.
3369 0 : m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDADDRV2));
3370 0 : }
3371 :
3372 0 : pfrom.m_has_all_wanted_services = HasAllDesirableServiceFlags(nServices);
3373 0 : peer->m_their_services = nServices;
3374 0 : pfrom.SetAddrLocal(addrMe);
3375 : {
3376 0 : LOCK(pfrom.m_subver_mutex);
3377 0 : pfrom.cleanSubVer = cleanSubVer;
3378 0 : }
3379 0 : peer->m_starting_height = starting_height;
3380 :
3381 : // Only initialize the Peer::TxRelay m_relay_txs data structure if:
3382 : // - this isn't an outbound block-relay-only connection, and
3383 : // - this isn't an outbound feeler connection, and
3384 : // - fRelay=true (the peer wishes to receive transaction announcements)
3385 : // or we're offering NODE_BLOOM to this peer. NODE_BLOOM means that
3386 : // the peer may turn on transaction relay later.
3387 0 : if (!pfrom.IsBlockOnlyConn() &&
3388 0 : !pfrom.IsFeelerConn() &&
3389 0 : (fRelay || (peer->m_our_services & NODE_BLOOM))) {
3390 0 : auto* const tx_relay = peer->SetTxRelay();
3391 : {
3392 0 : LOCK(tx_relay->m_bloom_filter_mutex);
3393 0 : tx_relay->m_relay_txs = fRelay; // set to true after we get the first filter* message
3394 0 : }
3395 0 : if (fRelay) pfrom.m_relays_txs = true;
3396 0 : }
3397 :
3398 0 : if (greatest_common_version >= WTXID_RELAY_VERSION && m_txreconciliation) {
3399 : // Per BIP-330, we announce txreconciliation support if:
3400 : // - protocol version per the peer's VERSION message supports WTXID_RELAY;
3401 : // - transaction relay is supported per the peer's VERSION message
3402 : // - this is not a block-relay-only connection and not a feeler
3403 : // - this is not an addr fetch connection;
3404 : // - we are not in -blocksonly mode.
3405 0 : const auto* tx_relay = peer->GetTxRelay();
3406 0 : if (tx_relay && WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs) &&
3407 0 : !pfrom.IsAddrFetchConn() && !m_opts.ignore_incoming_txs) {
3408 0 : const uint64_t recon_salt = m_txreconciliation->PreRegisterPeer(pfrom.GetId());
3409 0 : m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDTXRCNCL,
3410 : TXRECONCILIATION_VERSION, recon_salt));
3411 0 : }
3412 0 : }
3413 :
3414 0 : m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK));
3415 :
3416 : // Potentially mark this peer as a preferred download peer.
3417 : {
3418 0 : LOCK(cs_main);
3419 0 : CNodeState* state = State(pfrom.GetId());
3420 0 : state->fPreferredDownload = (!pfrom.IsInboundConn() || pfrom.HasPermission(NetPermissionFlags::NoBan)) && !pfrom.IsAddrFetchConn() && CanServeBlocks(*peer);
3421 0 : m_num_preferred_download_peers += state->fPreferredDownload;
3422 0 : }
3423 :
3424 : // Attempt to initialize address relay for outbound peers and use result
3425 : // to decide whether to send GETADDR, so that we don't send it to
3426 : // inbound or outbound block-relay-only peers.
3427 0 : bool send_getaddr{false};
3428 0 : if (!pfrom.IsInboundConn()) {
3429 0 : send_getaddr = SetupAddressRelay(pfrom, *peer);
3430 0 : }
3431 0 : if (send_getaddr) {
3432 : // Do a one-time address fetch to help populate/update our addrman.
3433 : // If we're starting up for the first time, our addrman may be pretty
3434 : // empty, so this mechanism is important to help us connect to the network.
3435 : // We skip this for block-relay-only peers. We want to avoid
3436 : // potentially leaking addr information and we do not want to
3437 : // indicate to the peer that we will participate in addr relay.
3438 0 : m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make(NetMsgType::GETADDR));
3439 0 : peer->m_getaddr_sent = true;
3440 : // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND addresses in response
3441 : // (bypassing the MAX_ADDR_PROCESSING_TOKEN_BUCKET limit).
3442 0 : peer->m_addr_token_bucket += MAX_ADDR_TO_SEND;
3443 0 : }
3444 :
3445 0 : if (!pfrom.IsInboundConn()) {
3446 : // For non-inbound connections, we update the addrman to record
3447 : // connection success so that addrman will have an up-to-date
3448 : // notion of which peers are online and available.
3449 : //
3450 : // While we strive to not leak information about block-relay-only
3451 : // connections via the addrman, not moving an address to the tried
3452 : // table is also potentially detrimental because new-table entries
3453 : // are subject to eviction in the event of addrman collisions. We
3454 : // mitigate the information-leak by never calling
3455 : // AddrMan::Connected() on block-relay-only peers; see
3456 : // FinalizeNode().
3457 : //
3458 : // This moves an address from New to Tried table in Addrman,
3459 : // resolves tried-table collisions, etc.
3460 0 : m_addrman.Good(pfrom.addr);
3461 0 : }
3462 :
3463 0 : std::string remoteAddr;
3464 0 : if (fLogIPs)
3465 0 : remoteAddr = ", peeraddr=" + pfrom.addr.ToStringAddrPort();
3466 :
3467 0 : const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)};
3468 0 : LogPrint(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s%s\n",
3469 : cleanSubVer, pfrom.nVersion,
3470 : peer->m_starting_height, addrMe.ToStringAddrPort(), fRelay, pfrom.GetId(),
3471 : remoteAddr, (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""));
3472 :
3473 0 : int64_t nTimeOffset = nTime - GetTime();
3474 0 : pfrom.nTimeOffset = nTimeOffset;
3475 0 : if (!pfrom.IsInboundConn()) {
3476 : // Don't use timedata samples from inbound peers to make it
3477 : // harder for others to tamper with our adjusted time.
3478 0 : AddTimeData(pfrom.addr, nTimeOffset);
3479 0 : }
3480 :
3481 : // If the peer is old enough to have the old alert system, send it the final alert.
3482 0 : if (greatest_common_version <= 70012) {
3483 0 : const auto finalAlert{ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50")};
3484 0 : m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make("alert", Span{finalAlert}));
3485 0 : }
3486 :
3487 : // Feeler connections exist only to verify if address is online.
3488 0 : if (pfrom.IsFeelerConn()) {
3489 0 : LogPrint(BCLog::NET, "feeler connection completed peer=%d; disconnecting\n", pfrom.GetId());
3490 0 : pfrom.fDisconnect = true;
3491 0 : }
3492 : return;
3493 0 : }
3494 :
3495 0 : if (pfrom.nVersion == 0) {
3496 : // Must have a version message before anything else
3497 0 : LogPrint(BCLog::NET, "non-version message before version handshake. Message \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
3498 0 : return;
3499 : }
3500 :
3501 : // At this point, the outgoing message serialization version can't change.
3502 0 : const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
3503 :
3504 0 : if (msg_type == NetMsgType::VERACK) {
3505 0 : if (pfrom.fSuccessfullyConnected) {
3506 0 : LogPrint(BCLog::NET, "ignoring redundant verack message from peer=%d\n", pfrom.GetId());
3507 0 : return;
3508 : }
3509 :
3510 0 : if (!pfrom.IsInboundConn()) {
3511 0 : const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)};
3512 0 : LogPrintf("New outbound peer connected: version: %d, blocks=%d, peer=%d%s%s (%s)\n",
3513 : pfrom.nVersion.load(), peer->m_starting_height,
3514 : pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToStringAddrPort()) : ""),
3515 : (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""),
3516 : pfrom.ConnectionTypeAsString());
3517 0 : }
3518 :
3519 0 : if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) {
3520 : // Tell our peer we are willing to provide version 2 cmpctblocks.
3521 : // However, we do not request new block announcements using
3522 : // cmpctblock messages.
3523 : // We send this to non-NODE NETWORK peers as well, because
3524 : // they may wish to request compact blocks from us
3525 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION));
3526 0 : }
3527 :
3528 0 : if (m_txreconciliation) {
3529 0 : if (!peer->m_wtxid_relay || !m_txreconciliation->IsPeerRegistered(pfrom.GetId())) {
3530 : // We could have optimistically pre-registered/registered the peer. In that case,
3531 : // we should forget about the reconciliation state here if this wasn't followed
3532 : // by WTXIDRELAY (since WTXIDRELAY can't be announced later).
3533 0 : m_txreconciliation->ForgetPeer(pfrom.GetId());
3534 0 : }
3535 0 : }
3536 :
3537 0 : if (auto tx_relay = peer->GetTxRelay()) {
3538 : // `TxRelay::m_tx_inventory_to_send` must be empty before the
3539 : // version handshake is completed as
3540 : // `TxRelay::m_next_inv_send_time` is first initialised in
3541 : // `SendMessages` after the verack is received. Any transactions
3542 : // received during the version handshake would otherwise
3543 : // immediately be advertised without random delay, potentially
3544 : // leaking the time of arrival to a spy.
3545 0 : Assume(WITH_LOCK(
3546 : tx_relay->m_tx_inventory_mutex,
3547 : return tx_relay->m_tx_inventory_to_send.empty() &&
3548 : tx_relay->m_next_inv_send_time == 0s));
3549 0 : }
3550 :
3551 0 : pfrom.fSuccessfullyConnected = true;
3552 0 : return;
3553 : }
3554 :
3555 0 : if (msg_type == NetMsgType::SENDHEADERS) {
3556 0 : peer->m_prefers_headers = true;
3557 0 : return;
3558 : }
3559 :
3560 0 : if (msg_type == NetMsgType::SENDCMPCT) {
3561 0 : bool sendcmpct_hb{false};
3562 0 : uint64_t sendcmpct_version{0};
3563 0 : vRecv >> sendcmpct_hb >> sendcmpct_version;
3564 :
3565 : // Only support compact block relay with witnesses
3566 0 : if (sendcmpct_version != CMPCTBLOCKS_VERSION) return;
3567 :
3568 0 : LOCK(cs_main);
3569 0 : CNodeState* nodestate = State(pfrom.GetId());
3570 0 : nodestate->m_provides_cmpctblocks = true;
3571 0 : nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
3572 : // save whether peer selects us as BIP152 high-bandwidth peer
3573 : // (receiving sendcmpct(1) signals high-bandwidth, sendcmpct(0) low-bandwidth)
3574 0 : pfrom.m_bip152_highbandwidth_from = sendcmpct_hb;
3575 : return;
3576 0 : }
3577 :
3578 : // BIP339 defines feature negotiation of wtxidrelay, which must happen between
3579 : // VERSION and VERACK to avoid relay problems from switching after a connection is up.
3580 0 : if (msg_type == NetMsgType::WTXIDRELAY) {
3581 0 : if (pfrom.fSuccessfullyConnected) {
3582 : // Disconnect peers that send a wtxidrelay message after VERACK.
3583 0 : LogPrint(BCLog::NET, "wtxidrelay received after verack from peer=%d; disconnecting\n", pfrom.GetId());
3584 0 : pfrom.fDisconnect = true;
3585 0 : return;
3586 : }
3587 0 : if (pfrom.GetCommonVersion() >= WTXID_RELAY_VERSION) {
3588 0 : if (!peer->m_wtxid_relay) {
3589 0 : peer->m_wtxid_relay = true;
3590 0 : m_wtxid_relay_peers++;
3591 0 : } else {
3592 0 : LogPrint(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId());
3593 : }
3594 0 : } else {
3595 0 : LogPrint(BCLog::NET, "ignoring wtxidrelay due to old common version=%d from peer=%d\n", pfrom.GetCommonVersion(), pfrom.GetId());
3596 : }
3597 0 : return;
3598 : }
3599 :
3600 : // BIP155 defines feature negotiation of addrv2 and sendaddrv2, which must happen
3601 : // between VERSION and VERACK.
3602 0 : if (msg_type == NetMsgType::SENDADDRV2) {
3603 0 : if (pfrom.fSuccessfullyConnected) {
3604 : // Disconnect peers that send a SENDADDRV2 message after VERACK.
3605 0 : LogPrint(BCLog::NET, "sendaddrv2 received after verack from peer=%d; disconnecting\n", pfrom.GetId());
3606 0 : pfrom.fDisconnect = true;
3607 0 : return;
3608 : }
3609 0 : peer->m_wants_addrv2 = true;
3610 0 : return;
3611 : }
3612 :
3613 : // Received from a peer demonstrating readiness to announce transactions via reconciliations.
3614 : // This feature negotiation must happen between VERSION and VERACK to avoid relay problems
3615 : // from switching announcement protocols after the connection is up.
3616 0 : if (msg_type == NetMsgType::SENDTXRCNCL) {
3617 0 : if (!m_txreconciliation) {
3618 0 : LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl from peer=%d ignored, as our node does not have txreconciliation enabled\n", pfrom.GetId());
3619 0 : return;
3620 : }
3621 :
3622 0 : if (pfrom.fSuccessfullyConnected) {
3623 0 : LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received after verack from peer=%d; disconnecting\n", pfrom.GetId());
3624 0 : pfrom.fDisconnect = true;
3625 0 : return;
3626 : }
3627 :
3628 : // Peer must not offer us reconciliations if we specified no tx relay support in VERSION.
3629 0 : if (RejectIncomingTxs(pfrom)) {
3630 0 : LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received from peer=%d to which we indicated no tx relay; disconnecting\n", pfrom.GetId());
3631 0 : pfrom.fDisconnect = true;
3632 0 : return;
3633 : }
3634 :
3635 : // Peer must not offer us reconciliations if they specified no tx relay support in VERSION.
3636 : // This flag might also be false in other cases, but the RejectIncomingTxs check above
3637 : // eliminates them, so that this flag fully represents what we are looking for.
3638 0 : const auto* tx_relay = peer->GetTxRelay();
3639 0 : if (!tx_relay || !WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs)) {
3640 0 : LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received from peer=%d which indicated no tx relay to us; disconnecting\n", pfrom.GetId());
3641 0 : pfrom.fDisconnect = true;
3642 0 : return;
3643 : }
3644 :
3645 : uint32_t peer_txreconcl_version;
3646 : uint64_t remote_salt;
3647 0 : vRecv >> peer_txreconcl_version >> remote_salt;
3648 :
3649 0 : const ReconciliationRegisterResult result = m_txreconciliation->RegisterPeer(pfrom.GetId(), pfrom.IsInboundConn(),
3650 0 : peer_txreconcl_version, remote_salt);
3651 0 : switch (result) {
3652 : case ReconciliationRegisterResult::NOT_FOUND:
3653 0 : LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "Ignore unexpected txreconciliation signal from peer=%d\n", pfrom.GetId());
3654 0 : break;
3655 : case ReconciliationRegisterResult::SUCCESS:
3656 0 : break;
3657 : case ReconciliationRegisterResult::ALREADY_REGISTERED:
3658 0 : LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "txreconciliation protocol violation from peer=%d (sendtxrcncl received from already registered peer); disconnecting\n", pfrom.GetId());
3659 0 : pfrom.fDisconnect = true;
3660 0 : return;
3661 : case ReconciliationRegisterResult::PROTOCOL_VIOLATION:
3662 0 : LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "txreconciliation protocol violation from peer=%d; disconnecting\n", pfrom.GetId());
3663 0 : pfrom.fDisconnect = true;
3664 0 : return;
3665 : }
3666 0 : return;
3667 : }
3668 :
3669 0 : if (!pfrom.fSuccessfullyConnected) {
3670 0 : LogPrint(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
3671 0 : return;
3672 : }
3673 :
3674 0 : if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
3675 0 : const auto ser_params{
3676 0 : msg_type == NetMsgType::ADDRV2 ?
3677 : // Set V2 param so that the CNetAddr and CAddress
3678 : // unserialize methods know that an address in v2 format is coming.
3679 : CAddress::V2_NETWORK :
3680 : CAddress::V1_NETWORK,
3681 : };
3682 :
3683 0 : std::vector<CAddress> vAddr;
3684 :
3685 0 : vRecv >> WithParams(ser_params, vAddr);
3686 :
3687 0 : if (!SetupAddressRelay(pfrom, *peer)) {
3688 0 : LogPrint(BCLog::NET, "ignoring %s message from %s peer=%d\n", msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId());
3689 0 : return;
3690 : }
3691 :
3692 0 : if (vAddr.size() > MAX_ADDR_TO_SEND)
3693 : {
3694 0 : Misbehaving(*peer, 20, strprintf("%s message size = %u", msg_type, vAddr.size()));
3695 0 : return;
3696 : }
3697 :
3698 : // Store the new addresses
3699 0 : std::vector<CAddress> vAddrOk;
3700 0 : const auto current_a_time{Now<NodeSeconds>()};
3701 :
3702 : // Update/increment addr rate limiting bucket.
3703 0 : const auto current_time{GetTime<std::chrono::microseconds>()};
3704 0 : if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
3705 : // Don't increment bucket if it's already full
3706 0 : const auto time_diff = std::max(current_time - peer->m_addr_token_timestamp, 0us);
3707 0 : const double increment = Ticks<SecondsDouble>(time_diff) * MAX_ADDR_RATE_PER_SECOND;
3708 0 : peer->m_addr_token_bucket = std::min<double>(peer->m_addr_token_bucket + increment, MAX_ADDR_PROCESSING_TOKEN_BUCKET);
3709 0 : }
3710 0 : peer->m_addr_token_timestamp = current_time;
3711 :
3712 0 : const bool rate_limited = !pfrom.HasPermission(NetPermissionFlags::Addr);
3713 0 : uint64_t num_proc = 0;
3714 0 : uint64_t num_rate_limit = 0;
3715 0 : Shuffle(vAddr.begin(), vAddr.end(), FastRandomContext());
3716 0 : for (CAddress& addr : vAddr)
3717 : {
3718 0 : if (interruptMsgProc)
3719 0 : return;
3720 :
3721 : // Apply rate limiting.
3722 0 : if (peer->m_addr_token_bucket < 1.0) {
3723 0 : if (rate_limited) {
3724 0 : ++num_rate_limit;
3725 0 : continue;
3726 : }
3727 0 : } else {
3728 0 : peer->m_addr_token_bucket -= 1.0;
3729 : }
3730 : // We only bother storing full nodes, though this may include
3731 : // things which we would not make an outbound connection to, in
3732 : // part because we may make feeler connections to them.
3733 0 : if (!MayHaveUsefulAddressDB(addr.nServices) && !HasAllDesirableServiceFlags(addr.nServices))
3734 0 : continue;
3735 :
3736 0 : if (addr.nTime <= NodeSeconds{100000000s} || addr.nTime > current_a_time + 10min) {
3737 0 : addr.nTime = current_a_time - 5 * 24h;
3738 0 : }
3739 0 : AddAddressKnown(*peer, addr);
3740 0 : if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
3741 : // Do not process banned/discouraged addresses beyond remembering we received them
3742 0 : continue;
3743 : }
3744 0 : ++num_proc;
3745 0 : bool fReachable = IsReachable(addr);
3746 0 : if (addr.nTime > current_a_time - 10min && !peer->m_getaddr_sent && vAddr.size() <= 10 && addr.IsRoutable()) {
3747 : // Relay to a limited number of other nodes
3748 0 : RelayAddress(pfrom.GetId(), addr, fReachable);
3749 0 : }
3750 : // Do not store addresses outside our network
3751 0 : if (fReachable)
3752 0 : vAddrOk.push_back(addr);
3753 : }
3754 0 : peer->m_addr_processed += num_proc;
3755 0 : peer->m_addr_rate_limited += num_rate_limit;
3756 0 : LogPrint(BCLog::NET, "Received addr: %u addresses (%u processed, %u rate-limited) from peer=%d\n",
3757 : vAddr.size(), num_proc, num_rate_limit, pfrom.GetId());
3758 :
3759 0 : m_addrman.Add(vAddrOk, pfrom.addr, 2h);
3760 0 : if (vAddr.size() < 1000) peer->m_getaddr_sent = false;
3761 :
3762 : // AddrFetch: Require multiple addresses to avoid disconnecting on self-announcements
3763 0 : if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) {
3764 0 : LogPrint(BCLog::NET, "addrfetch connection completed peer=%d; disconnecting\n", pfrom.GetId());
3765 0 : pfrom.fDisconnect = true;
3766 0 : }
3767 0 : return;
3768 0 : }
3769 :
3770 0 : if (msg_type == NetMsgType::INV) {
3771 0 : std::vector<CInv> vInv;
3772 0 : vRecv >> vInv;
3773 0 : if (vInv.size() > MAX_INV_SZ)
3774 : {
3775 0 : Misbehaving(*peer, 20, strprintf("inv message size = %u", vInv.size()));
3776 0 : return;
3777 : }
3778 :
3779 0 : const bool reject_tx_invs{RejectIncomingTxs(pfrom)};
3780 :
3781 0 : LOCK(cs_main);
3782 :
3783 0 : const auto current_time{GetTime<std::chrono::microseconds>()};
3784 0 : uint256* best_block{nullptr};
3785 :
3786 0 : for (CInv& inv : vInv) {
3787 0 : if (interruptMsgProc) return;
3788 :
3789 : // Ignore INVs that don't match wtxidrelay setting.
3790 : // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting.
3791 : // This is fine as no INV messages are involved in that process.
3792 0 : if (peer->m_wtxid_relay) {
3793 0 : if (inv.IsMsgTx()) continue;
3794 0 : } else {
3795 0 : if (inv.IsMsgWtx()) continue;
3796 : }
3797 :
3798 0 : if (inv.IsMsgBlk()) {
3799 0 : const bool fAlreadyHave = AlreadyHaveBlock(inv.hash);
3800 0 : LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
3801 :
3802 0 : UpdateBlockAvailability(pfrom.GetId(), inv.hash);
3803 0 : if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() && !IsBlockRequested(inv.hash)) {
3804 : // Headers-first is the primary method of announcement on
3805 : // the network. If a node fell back to sending blocks by
3806 : // inv, it may be for a re-org, or because we haven't
3807 : // completed initial headers sync. The final block hash
3808 : // provided should be the highest, so send a getheaders and
3809 : // then fetch the blocks we need to catch up.
3810 0 : best_block = &inv.hash;
3811 0 : }
3812 0 : } else if (inv.IsGenTxMsg()) {
3813 0 : if (reject_tx_invs) {
3814 0 : LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.hash.ToString(), pfrom.GetId());
3815 0 : pfrom.fDisconnect = true;
3816 0 : return;
3817 : }
3818 0 : const GenTxid gtxid = ToGenTxid(inv);
3819 0 : const bool fAlreadyHave = AlreadyHaveTx(gtxid);
3820 0 : LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
3821 :
3822 0 : AddKnownTx(*peer, inv.hash);
3823 0 : if (!fAlreadyHave && !m_chainman.IsInitialBlockDownload()) {
3824 0 : AddTxAnnouncement(pfrom, gtxid, current_time);
3825 0 : }
3826 0 : } else {
3827 0 : LogPrint(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId());
3828 : }
3829 : }
3830 :
3831 0 : if (best_block != nullptr) {
3832 : // If we haven't started initial headers-sync with this peer, then
3833 : // consider sending a getheaders now. On initial startup, there's a
3834 : // reliability vs bandwidth tradeoff, where we are only trying to do
3835 : // initial headers sync with one peer at a time, with a long
3836 : // timeout (at which point, if the sync hasn't completed, we will
3837 : // disconnect the peer and then choose another). In the meantime,
3838 : // as new blocks are found, we are willing to add one new peer per
3839 : // block to sync with as well, to sync quicker in the case where
3840 : // our initial peer is unresponsive (but less bandwidth than we'd
3841 : // use if we turned on sync with all peers).
3842 0 : CNodeState& state{*Assert(State(pfrom.GetId()))};
3843 0 : if (state.fSyncStarted || (!peer->m_inv_triggered_getheaders_before_sync && *best_block != m_last_block_inv_triggering_headers_sync)) {
3844 0 : if (MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer)) {
3845 0 : LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
3846 : m_chainman.m_best_header->nHeight, best_block->ToString(),
3847 : pfrom.GetId());
3848 0 : }
3849 0 : if (!state.fSyncStarted) {
3850 0 : peer->m_inv_triggered_getheaders_before_sync = true;
3851 : // Update the last block hash that triggered a new headers
3852 : // sync, so that we don't turn on headers sync with more
3853 : // than 1 new peer every new block.
3854 0 : m_last_block_inv_triggering_headers_sync = *best_block;
3855 0 : }
3856 0 : }
3857 0 : }
3858 :
3859 0 : return;
3860 0 : }
3861 :
3862 0 : if (msg_type == NetMsgType::GETDATA) {
3863 0 : std::vector<CInv> vInv;
3864 0 : vRecv >> vInv;
3865 0 : if (vInv.size() > MAX_INV_SZ)
3866 : {
3867 0 : Misbehaving(*peer, 20, strprintf("getdata message size = %u", vInv.size()));
3868 0 : return;
3869 : }
3870 :
3871 0 : LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId());
3872 :
3873 0 : if (vInv.size() > 0) {
3874 0 : LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId());
3875 0 : }
3876 :
3877 : {
3878 0 : LOCK(peer->m_getdata_requests_mutex);
3879 0 : peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end());
3880 0 : ProcessGetData(pfrom, *peer, interruptMsgProc);
3881 0 : }
3882 :
3883 0 : return;
3884 0 : }
3885 :
3886 0 : if (msg_type == NetMsgType::GETBLOCKS) {
3887 0 : CBlockLocator locator;
3888 0 : uint256 hashStop;
3889 0 : vRecv >> locator >> hashStop;
3890 :
3891 0 : if (locator.vHave.size() > MAX_LOCATOR_SZ) {
3892 0 : LogPrint(BCLog::NET, "getblocks locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
3893 0 : pfrom.fDisconnect = true;
3894 0 : return;
3895 : }
3896 :
3897 : // We might have announced the currently-being-connected tip using a
3898 : // compact block, which resulted in the peer sending a getblocks
3899 : // request, which we would otherwise respond to without the new block.
3900 : // To avoid this situation we simply verify that we are on our best
3901 : // known chain now. This is super overkill, but we handle it better
3902 : // for getheaders requests, and there are no known nodes which support
3903 : // compact blocks but still use getblocks to request blocks.
3904 : {
3905 0 : std::shared_ptr<const CBlock> a_recent_block;
3906 : {
3907 0 : LOCK(m_most_recent_block_mutex);
3908 0 : a_recent_block = m_most_recent_block;
3909 0 : }
3910 0 : BlockValidationState state;
3911 0 : if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) {
3912 0 : LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString());
3913 0 : }
3914 0 : }
3915 :
3916 0 : LOCK(cs_main);
3917 :
3918 : // Find the last block the caller has in the main chain
3919 0 : const CBlockIndex* pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
3920 :
3921 : // Send the rest of the chain
3922 0 : if (pindex)
3923 0 : pindex = m_chainman.ActiveChain().Next(pindex);
3924 0 : int nLimit = 500;
3925 0 : LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId());
3926 0 : for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
3927 : {
3928 0 : if (pindex->GetBlockHash() == hashStop)
3929 : {
3930 0 : LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
3931 0 : break;
3932 : }
3933 : // If pruning, don't inv blocks unless we have on disk and are likely to still have
3934 : // for some reasonable time window (1 hour) that block relay might require.
3935 0 : const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
3936 0 : if (m_chainman.m_blockman.IsPruneMode() && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave)) {
3937 0 : LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
3938 0 : break;
3939 : }
3940 0 : WITH_LOCK(peer->m_block_inv_mutex, peer->m_blocks_for_inv_relay.push_back(pindex->GetBlockHash()));
3941 0 : if (--nLimit <= 0) {
3942 : // When this block is requested, we'll send an inv that'll
3943 : // trigger the peer to getblocks the next batch of inventory.
3944 0 : LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
3945 0 : WITH_LOCK(peer->m_block_inv_mutex, {peer->m_continuation_block = pindex->GetBlockHash();});
3946 0 : break;
3947 : }
3948 0 : }
3949 : return;
3950 0 : }
3951 :
3952 0 : if (msg_type == NetMsgType::GETBLOCKTXN) {
3953 0 : BlockTransactionsRequest req;
3954 0 : vRecv >> req;
3955 :
3956 0 : std::shared_ptr<const CBlock> recent_block;
3957 : {
3958 0 : LOCK(m_most_recent_block_mutex);
3959 0 : if (m_most_recent_block_hash == req.blockhash)
3960 0 : recent_block = m_most_recent_block;
3961 : // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion
3962 0 : }
3963 0 : if (recent_block) {
3964 0 : SendBlockTransactions(pfrom, *peer, *recent_block, req);
3965 0 : return;
3966 : }
3967 :
3968 : {
3969 0 : LOCK(cs_main);
3970 :
3971 0 : const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(req.blockhash);
3972 0 : if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) {
3973 0 : LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId());
3974 0 : return;
3975 : }
3976 :
3977 0 : if (pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) {
3978 0 : CBlock block;
3979 0 : const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(block, *pindex)};
3980 0 : assert(ret);
3981 :
3982 0 : SendBlockTransactions(pfrom, *peer, block, req);
3983 : return;
3984 0 : }
3985 0 : }
3986 :
3987 : // If an older block is requested (should never happen in practice,
3988 : // but can happen in tests) send a block response instead of a
3989 : // blocktxn response. Sending a full block response instead of a
3990 : // small blocktxn response is preferable in the case where a peer
3991 : // might maliciously send lots of getblocktxn requests to trigger
3992 : // expensive disk reads, because it will require the peer to
3993 : // actually receive all the data read from disk over the network.
3994 0 : LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
3995 0 : CInv inv{MSG_WITNESS_BLOCK, req.blockhash};
3996 0 : WITH_LOCK(peer->m_getdata_requests_mutex, peer->m_getdata_requests.push_back(inv));
3997 : // The message processing loop will go around again (without pausing) and we'll respond then
3998 0 : return;
3999 0 : }
4000 :
4001 0 : if (msg_type == NetMsgType::GETHEADERS) {
4002 0 : CBlockLocator locator;
4003 0 : uint256 hashStop;
4004 0 : vRecv >> locator >> hashStop;
4005 :
4006 0 : if (locator.vHave.size() > MAX_LOCATOR_SZ) {
4007 0 : LogPrint(BCLog::NET, "getheaders locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
4008 0 : pfrom.fDisconnect = true;
4009 0 : return;
4010 : }
4011 :
4012 0 : if (m_chainman.m_blockman.LoadingBlocks()) {
4013 0 : LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d while importing/reindexing\n", pfrom.GetId());
4014 0 : return;
4015 : }
4016 :
4017 0 : LOCK(cs_main);
4018 :
4019 : // Note that if we were to be on a chain that forks from the checkpointed
4020 : // chain, then serving those headers to a peer that has seen the
4021 : // checkpointed chain would cause that peer to disconnect us. Requiring
4022 : // that our chainwork exceed the minimum chain work is a protection against
4023 : // being fed a bogus chain when we started up for the first time and
4024 : // getting partitioned off the honest network for serving that chain to
4025 : // others.
4026 0 : if (m_chainman.ActiveTip() == nullptr ||
4027 0 : (m_chainman.ActiveTip()->nChainWork < m_chainman.MinimumChainWork() && !pfrom.HasPermission(NetPermissionFlags::Download))) {
4028 0 : LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work; sending empty response\n", pfrom.GetId());
4029 : // Just respond with an empty headers message, to tell the peer to
4030 : // go away but not treat us as unresponsive.
4031 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, std::vector<CBlock>()));
4032 0 : return;
4033 : }
4034 :
4035 0 : CNodeState *nodestate = State(pfrom.GetId());
4036 0 : const CBlockIndex* pindex = nullptr;
4037 0 : if (locator.IsNull())
4038 : {
4039 : // If locator is null, return the hashStop block
4040 0 : pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop);
4041 0 : if (!pindex) {
4042 0 : return;
4043 : }
4044 :
4045 0 : if (!BlockRequestAllowed(pindex)) {
4046 0 : LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom.GetId());
4047 0 : return;
4048 : }
4049 0 : }
4050 : else
4051 : {
4052 : // Find the last block the caller has in the main chain
4053 0 : pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
4054 0 : if (pindex)
4055 0 : pindex = m_chainman.ActiveChain().Next(pindex);
4056 : }
4057 :
4058 : // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
4059 0 : std::vector<CBlock> vHeaders;
4060 0 : int nLimit = MAX_HEADERS_RESULTS;
4061 0 : LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId());
4062 0 : for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex))
4063 : {
4064 0 : vHeaders.push_back(pindex->GetBlockHeader());
4065 0 : if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
4066 0 : break;
4067 0 : }
4068 : // pindex can be nullptr either if we sent m_chainman.ActiveChain().Tip() OR
4069 : // if our peer has m_chainman.ActiveChain().Tip() (and thus we are sending an empty
4070 : // headers message). In both cases it's safe to update
4071 : // pindexBestHeaderSent to be our tip.
4072 : //
4073 : // It is important that we simply reset the BestHeaderSent value here,
4074 : // and not max(BestHeaderSent, newHeaderSent). We might have announced
4075 : // the currently-being-connected tip using a compact block, which
4076 : // resulted in the peer sending a headers request, which we respond to
4077 : // without the new block. By resetting the BestHeaderSent, we ensure we
4078 : // will re-announce the new block via headers (or compact blocks again)
4079 : // in the SendMessages logic.
4080 0 : nodestate->pindexBestHeaderSent = pindex ? pindex : m_chainman.ActiveChain().Tip();
4081 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
4082 : return;
4083 0 : }
4084 :
4085 0 : if (msg_type == NetMsgType::TX) {
4086 0 : if (RejectIncomingTxs(pfrom)) {
4087 0 : LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId());
4088 0 : pfrom.fDisconnect = true;
4089 0 : return;
4090 : }
4091 :
4092 : // Stop processing the transaction early if we are still in IBD since we don't
4093 : // have enough information to validate it yet. Sending unsolicited transactions
4094 : // is not considered a protocol violation, so don't punish the peer.
4095 0 : if (m_chainman.IsInitialBlockDownload()) return;
4096 :
4097 0 : CTransactionRef ptx;
4098 0 : vRecv >> ptx;
4099 0 : const CTransaction& tx = *ptx;
4100 :
4101 0 : const uint256& txid = ptx->GetHash();
4102 0 : const uint256& wtxid = ptx->GetWitnessHash();
4103 :
4104 0 : const uint256& hash = peer->m_wtxid_relay ? wtxid : txid;
4105 0 : AddKnownTx(*peer, hash);
4106 :
4107 0 : LOCK(cs_main);
4108 :
4109 0 : m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
4110 0 : if (tx.HasWitness()) m_txrequest.ReceivedResponse(pfrom.GetId(), wtxid);
4111 :
4112 : // We do the AlreadyHaveTx() check using wtxid, rather than txid - in the
4113 : // absence of witness malleation, this is strictly better, because the
4114 : // recent rejects filter may contain the wtxid but rarely contains
4115 : // the txid of a segwit transaction that has been rejected.
4116 : // In the presence of witness malleation, it's possible that by only
4117 : // doing the check with wtxid, we could overlook a transaction which
4118 : // was confirmed with a different witness, or exists in our mempool
4119 : // with a different witness, but this has limited downside:
4120 : // mempool validation does its own lookup of whether we have the txid
4121 : // already; and an adversary can already relay us old transactions
4122 : // (older than our recency filter) if trying to DoS us, without any need
4123 : // for witness malleation.
4124 0 : if (AlreadyHaveTx(GenTxid::Wtxid(wtxid))) {
4125 0 : if (pfrom.HasPermission(NetPermissionFlags::ForceRelay)) {
4126 : // Always relay transactions received from peers with forcerelay
4127 : // permission, even if they were already in the mempool, allowing
4128 : // the node to function as a gateway for nodes hidden behind it.
4129 0 : if (!m_mempool.exists(GenTxid::Txid(tx.GetHash()))) {
4130 0 : LogPrintf("Not relaying non-mempool transaction %s (wtxid=%s) from forcerelay peer=%d\n",
4131 : tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), pfrom.GetId());
4132 0 : } else {
4133 0 : LogPrintf("Force relaying tx %s (wtxid=%s) from peer=%d\n",
4134 : tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), pfrom.GetId());
4135 0 : RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
4136 : }
4137 0 : }
4138 : // If a tx is detected by m_recent_rejects it is ignored. Because we haven't
4139 : // submitted the tx to our mempool, we won't have computed a DoS
4140 : // score for it or determined exactly why we consider it invalid.
4141 : //
4142 : // This means we won't penalize any peer subsequently relaying a DoSy
4143 : // tx (even if we penalized the first peer who gave it to us) because
4144 : // we have to account for m_recent_rejects showing false positives. In
4145 : // other words, we shouldn't penalize a peer if we aren't *sure* they
4146 : // submitted a DoSy tx.
4147 : //
4148 : // Note that m_recent_rejects doesn't just record DoSy or invalid
4149 : // transactions, but any tx not accepted by the mempool, which may be
4150 : // due to node policy (vs. consensus). So we can't blanket penalize a
4151 : // peer simply for relaying a tx that our m_recent_rejects has caught,
4152 : // regardless of false positives.
4153 0 : return;
4154 : }
4155 :
4156 0 : const MempoolAcceptResult result = m_chainman.ProcessTransaction(ptx);
4157 0 : const TxValidationState& state = result.m_state;
4158 :
4159 0 : if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
4160 : // As this version of the transaction was acceptable, we can forget about any
4161 : // requests for it.
4162 0 : m_txrequest.ForgetTxHash(tx.GetHash());
4163 0 : m_txrequest.ForgetTxHash(tx.GetWitnessHash());
4164 0 : RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
4165 0 : m_orphanage.AddChildrenToWorkSet(tx);
4166 :
4167 0 : pfrom.m_last_tx_time = GetTime<std::chrono::seconds>();
4168 :
4169 0 : LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n",
4170 : pfrom.GetId(),
4171 : tx.GetHash().ToString(),
4172 : tx.GetWitnessHash().ToString(),
4173 : m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000);
4174 :
4175 0 : for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) {
4176 0 : AddToCompactExtraTransactions(removedTx);
4177 : }
4178 0 : }
4179 0 : else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS)
4180 : {
4181 0 : bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
4182 :
4183 : // Deduplicate parent txids, so that we don't have to loop over
4184 : // the same parent txid more than once down below.
4185 0 : std::vector<uint256> unique_parents;
4186 0 : unique_parents.reserve(tx.vin.size());
4187 0 : for (const CTxIn& txin : tx.vin) {
4188 : // We start with all parents, and then remove duplicates below.
4189 0 : unique_parents.push_back(txin.prevout.hash);
4190 : }
4191 0 : std::sort(unique_parents.begin(), unique_parents.end());
4192 0 : unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end());
4193 0 : for (const uint256& parent_txid : unique_parents) {
4194 0 : if (m_recent_rejects.contains(parent_txid)) {
4195 0 : fRejectedParents = true;
4196 0 : break;
4197 : }
4198 : }
4199 0 : if (!fRejectedParents) {
4200 0 : const auto current_time{GetTime<std::chrono::microseconds>()};
4201 :
4202 0 : for (const uint256& parent_txid : unique_parents) {
4203 : // Here, we only have the txid (and not wtxid) of the
4204 : // inputs, so we only request in txid mode, even for
4205 : // wtxidrelay peers.
4206 : // Eventually we should replace this with an improved
4207 : // protocol for getting all unconfirmed parents.
4208 0 : const auto gtxid{GenTxid::Txid(parent_txid)};
4209 0 : AddKnownTx(*peer, parent_txid);
4210 0 : if (!AlreadyHaveTx(gtxid)) AddTxAnnouncement(pfrom, gtxid, current_time);
4211 : }
4212 :
4213 0 : if (m_orphanage.AddTx(ptx, pfrom.GetId())) {
4214 0 : AddToCompactExtraTransactions(ptx);
4215 0 : }
4216 :
4217 : // Once added to the orphan pool, a tx is considered AlreadyHave, and we shouldn't request it anymore.
4218 0 : m_txrequest.ForgetTxHash(tx.GetHash());
4219 0 : m_txrequest.ForgetTxHash(tx.GetWitnessHash());
4220 :
4221 : // DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789)
4222 0 : m_orphanage.LimitOrphans(m_opts.max_orphan_txs);
4223 0 : } else {
4224 0 : LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s (wtxid=%s)\n",
4225 : tx.GetHash().ToString(),
4226 : tx.GetWitnessHash().ToString());
4227 : // We will continue to reject this tx since it has rejected
4228 : // parents so avoid re-requesting it from other peers.
4229 : // Here we add both the txid and the wtxid, as we know that
4230 : // regardless of what witness is provided, we will not accept
4231 : // this, so we don't need to allow for redownload of this txid
4232 : // from any of our non-wtxidrelay peers.
4233 0 : m_recent_rejects.insert(tx.GetHash());
4234 0 : m_recent_rejects.insert(tx.GetWitnessHash());
4235 0 : m_txrequest.ForgetTxHash(tx.GetHash());
4236 0 : m_txrequest.ForgetTxHash(tx.GetWitnessHash());
4237 : }
4238 0 : } else {
4239 0 : if (state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) {
4240 : // We can add the wtxid of this transaction to our reject filter.
4241 : // Do not add txids of witness transactions or witness-stripped
4242 : // transactions to the filter, as they can have been malleated;
4243 : // adding such txids to the reject filter would potentially
4244 : // interfere with relay of valid transactions from peers that
4245 : // do not support wtxid-based relay. See
4246 : // https://github.com/bitcoin/bitcoin/issues/8279 for details.
4247 : // We can remove this restriction (and always add wtxids to
4248 : // the filter even for witness stripped transactions) once
4249 : // wtxid-based relay is broadly deployed.
4250 : // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
4251 : // for concerns around weakening security of unupgraded nodes
4252 : // if we start doing this too early.
4253 0 : m_recent_rejects.insert(tx.GetWitnessHash());
4254 0 : m_txrequest.ForgetTxHash(tx.GetWitnessHash());
4255 : // If the transaction failed for TX_INPUTS_NOT_STANDARD,
4256 : // then we know that the witness was irrelevant to the policy
4257 : // failure, since this check depends only on the txid
4258 : // (the scriptPubKey being spent is covered by the txid).
4259 : // Add the txid to the reject filter to prevent repeated
4260 : // processing of this transaction in the event that child
4261 : // transactions are later received (resulting in
4262 : // parent-fetching by txid via the orphan-handling logic).
4263 0 : if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && tx.GetWitnessHash() != tx.GetHash()) {
4264 0 : m_recent_rejects.insert(tx.GetHash());
4265 0 : m_txrequest.ForgetTxHash(tx.GetHash());
4266 0 : }
4267 0 : if (RecursiveDynamicUsage(*ptx) < 100000) {
4268 0 : AddToCompactExtraTransactions(ptx);
4269 0 : }
4270 0 : }
4271 : }
4272 :
4273 0 : if (state.IsInvalid()) {
4274 0 : LogPrint(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n",
4275 : tx.GetHash().ToString(),
4276 : tx.GetWitnessHash().ToString(),
4277 : pfrom.GetId(),
4278 : state.ToString());
4279 0 : MaybePunishNodeForTx(pfrom.GetId(), state);
4280 0 : }
4281 : return;
4282 0 : }
4283 :
4284 0 : if (msg_type == NetMsgType::CMPCTBLOCK)
4285 : {
4286 : // Ignore cmpctblock received while importing
4287 0 : if (m_chainman.m_blockman.LoadingBlocks()) {
4288 0 : LogPrint(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId());
4289 0 : return;
4290 : }
4291 :
4292 0 : CBlockHeaderAndShortTxIDs cmpctblock;
4293 0 : vRecv >> cmpctblock;
4294 :
4295 0 : bool received_new_header = false;
4296 0 : const auto blockhash = cmpctblock.header.GetHash();
4297 :
4298 : {
4299 0 : LOCK(cs_main);
4300 :
4301 0 : const CBlockIndex* prev_block = m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock);
4302 0 : if (!prev_block) {
4303 : // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
4304 0 : if (!m_chainman.IsInitialBlockDownload()) {
4305 0 : MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer);
4306 0 : }
4307 0 : return;
4308 0 : } else if (prev_block->nChainWork + CalculateHeadersWork({cmpctblock.header}) < GetAntiDoSWorkThreshold()) {
4309 : // If we get a low-work header in a compact block, we can ignore it.
4310 0 : LogPrint(BCLog::NET, "Ignoring low-work compact block from peer %d\n", pfrom.GetId());
4311 0 : return;
4312 : }
4313 :
4314 0 : if (!m_chainman.m_blockman.LookupBlockIndex(blockhash)) {
4315 0 : received_new_header = true;
4316 0 : }
4317 0 : }
4318 :
4319 0 : const CBlockIndex *pindex = nullptr;
4320 0 : BlockValidationState state;
4321 0 : if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header}, /*min_pow_checked=*/true, state, &pindex)) {
4322 0 : if (state.IsInvalid()) {
4323 0 : MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block=*/true, "invalid header via cmpctblock");
4324 0 : return;
4325 : }
4326 0 : }
4327 :
4328 0 : if (received_new_header) {
4329 0 : LogPrintfCategory(BCLog::NET, "Saw new cmpctblock header hash=%s peer=%d\n",
4330 : blockhash.ToString(), pfrom.GetId());
4331 0 : }
4332 :
4333 0 : bool fProcessBLOCKTXN = false;
4334 :
4335 : // If we end up treating this as a plain headers message, call that as well
4336 : // without cs_main.
4337 0 : bool fRevertToHeaderProcessing = false;
4338 :
4339 : // Keep a CBlock for "optimistic" compactblock reconstructions (see
4340 : // below)
4341 0 : std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4342 0 : bool fBlockReconstructed = false;
4343 :
4344 : {
4345 0 : LOCK(cs_main);
4346 : // If AcceptBlockHeader returned true, it set pindex
4347 0 : assert(pindex);
4348 0 : UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
4349 :
4350 0 : CNodeState *nodestate = State(pfrom.GetId());
4351 :
4352 : // If this was a new header with more work than our tip, update the
4353 : // peer's last block announcement time
4354 0 : if (received_new_header && pindex->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
4355 0 : nodestate->m_last_block_announcement = GetTime();
4356 0 : }
4357 :
4358 0 : if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
4359 0 : return;
4360 :
4361 0 : auto range_flight = mapBlocksInFlight.equal_range(pindex->GetBlockHash());
4362 0 : size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
4363 0 : bool requested_block_from_this_peer{false};
4364 :
4365 : // Multimap ensures ordering of outstanding requests. It's either empty or first in line.
4366 0 : bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId());
4367 :
4368 0 : while (range_flight.first != range_flight.second) {
4369 0 : if (range_flight.first->second.first == pfrom.GetId()) {
4370 0 : requested_block_from_this_peer = true;
4371 0 : break;
4372 : }
4373 0 : range_flight.first++;
4374 : }
4375 :
4376 0 : if (pindex->nChainWork <= m_chainman.ActiveChain().Tip()->nChainWork || // We know something better
4377 0 : pindex->nTx != 0) { // We had this block at some point, but pruned it
4378 0 : if (requested_block_from_this_peer) {
4379 : // We requested this block for some reason, but our mempool will probably be useless
4380 : // so we just grab the block via normal getdata
4381 0 : std::vector<CInv> vInv(1);
4382 0 : vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4383 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
4384 0 : }
4385 0 : return;
4386 : }
4387 :
4388 : // If we're not close to tip yet, give up and let parallel block fetch work its magic
4389 0 : if (!already_in_flight && !CanDirectFetch()) {
4390 0 : return;
4391 : }
4392 :
4393 : // We want to be a bit conservative just to be extra careful about DoS
4394 : // possibilities in compact block processing...
4395 0 : if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) {
4396 0 : if ((already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK && nodestate->vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
4397 0 : requested_block_from_this_peer) {
4398 0 : std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
4399 0 : if (!BlockRequested(pfrom.GetId(), *pindex, &queuedBlockIt)) {
4400 0 : if (!(*queuedBlockIt)->partialBlock)
4401 0 : (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool));
4402 : else {
4403 : // The block was already in flight using compact blocks from the same peer
4404 0 : LogPrint(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
4405 0 : return;
4406 : }
4407 0 : }
4408 :
4409 0 : PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
4410 0 : ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
4411 0 : if (status == READ_STATUS_INVALID) {
4412 0 : RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
4413 0 : Misbehaving(*peer, 100, "invalid compact block");
4414 0 : return;
4415 0 : } else if (status == READ_STATUS_FAILED) {
4416 0 : if (first_in_flight) {
4417 : // Duplicate txindexes, the block is now in-flight, so just request it
4418 0 : std::vector<CInv> vInv(1);
4419 0 : vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4420 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
4421 0 : } else {
4422 : // Give up for this peer and wait for other peer(s)
4423 0 : RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId());
4424 : }
4425 0 : return;
4426 : }
4427 :
4428 0 : BlockTransactionsRequest req;
4429 0 : for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
4430 0 : if (!partialBlock.IsTxAvailable(i))
4431 0 : req.indexes.push_back(i);
4432 0 : }
4433 0 : if (req.indexes.empty()) {
4434 0 : fProcessBLOCKTXN = true;
4435 0 : } else if (first_in_flight) {
4436 : // We will try to round-trip any compact blocks we get on failure,
4437 : // as long as it's first...
4438 0 : req.blockhash = pindex->GetBlockHash();
4439 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
4440 0 : } else if (pfrom.m_bip152_highbandwidth_to &&
4441 0 : (!pfrom.IsInboundConn() ||
4442 0 : IsBlockRequestedFromOutbound(blockhash) ||
4443 0 : already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK - 1)) {
4444 : // ... or it's a hb relay peer and:
4445 : // - peer is outbound, or
4446 : // - we already have an outbound attempt in flight(so we'll take what we can get), or
4447 : // - it's not the final parallel download slot (which we may reserve for first outbound)
4448 0 : req.blockhash = pindex->GetBlockHash();
4449 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
4450 0 : } else {
4451 : // Give up for this peer and wait for other peer(s)
4452 0 : RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId());
4453 : }
4454 0 : } else {
4455 : // This block is either already in flight from a different
4456 : // peer, or this peer has too many blocks outstanding to
4457 : // download from.
4458 : // Optimistically try to reconstruct anyway since we might be
4459 : // able to without any round trips.
4460 0 : PartiallyDownloadedBlock tempBlock(&m_mempool);
4461 0 : ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
4462 0 : if (status != READ_STATUS_OK) {
4463 : // TODO: don't ignore failures
4464 0 : return;
4465 : }
4466 0 : std::vector<CTransactionRef> dummy;
4467 0 : status = tempBlock.FillBlock(*pblock, dummy);
4468 0 : if (status == READ_STATUS_OK) {
4469 0 : fBlockReconstructed = true;
4470 0 : }
4471 0 : }
4472 0 : } else {
4473 0 : if (requested_block_from_this_peer) {
4474 : // We requested this block, but its far into the future, so our
4475 : // mempool will probably be useless - request the block normally
4476 0 : std::vector<CInv> vInv(1);
4477 0 : vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
4478 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
4479 : return;
4480 0 : } else {
4481 : // If this was an announce-cmpctblock, we want the same treatment as a header message
4482 0 : fRevertToHeaderProcessing = true;
4483 : }
4484 : }
4485 0 : } // cs_main
4486 :
4487 0 : if (fProcessBLOCKTXN) {
4488 0 : BlockTransactions txn;
4489 0 : txn.blockhash = blockhash;
4490 0 : return ProcessCompactBlockTxns(pfrom, *peer, txn);
4491 0 : }
4492 :
4493 0 : if (fRevertToHeaderProcessing) {
4494 : // Headers received from HB compact block peers are permitted to be
4495 : // relayed before full validation (see BIP 152), so we don't want to disconnect
4496 : // the peer if the header turns out to be for an invalid block.
4497 : // Note that if a peer tries to build on an invalid chain, that
4498 : // will be detected and the peer will be disconnected/discouraged.
4499 0 : return ProcessHeadersMessage(pfrom, *peer, {cmpctblock.header}, /*via_compact_block=*/true);
4500 : }
4501 :
4502 0 : if (fBlockReconstructed) {
4503 : // If we got here, we were able to optimistically reconstruct a
4504 : // block that is in flight from some other peer.
4505 : {
4506 0 : LOCK(cs_main);
4507 0 : mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false));
4508 0 : }
4509 : // Setting force_processing to true means that we bypass some of
4510 : // our anti-DoS protections in AcceptBlock, which filters
4511 : // unrequested blocks that might be trying to waste our resources
4512 : // (eg disk space). Because we only try to reconstruct blocks when
4513 : // we're close to caught up (via the CanDirectFetch() requirement
4514 : // above, combined with the behavior of not requesting blocks until
4515 : // we have a chain with at least the minimum chain work), and we ignore
4516 : // compact blocks with less work than our tip, it is safe to treat
4517 : // reconstructed compact blocks as having been requested.
4518 0 : ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true);
4519 0 : LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
4520 0 : if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
4521 : // Clear download state for this block, which is in
4522 : // process from some other peer. We do this after calling
4523 : // ProcessNewBlock so that a malleated cmpctblock announcement
4524 : // can't be used to interfere with block relay.
4525 0 : RemoveBlockRequest(pblock->GetHash(), std::nullopt);
4526 0 : }
4527 0 : }
4528 0 : return;
4529 0 : }
4530 :
4531 0 : if (msg_type == NetMsgType::BLOCKTXN)
4532 : {
4533 : // Ignore blocktxn received while importing
4534 0 : if (m_chainman.m_blockman.LoadingBlocks()) {
4535 0 : LogPrint(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId());
4536 0 : return;
4537 : }
4538 :
4539 0 : BlockTransactions resp;
4540 0 : vRecv >> resp;
4541 :
4542 0 : return ProcessCompactBlockTxns(pfrom, *peer, resp);
4543 0 : }
4544 :
4545 0 : if (msg_type == NetMsgType::HEADERS)
4546 : {
4547 : // Ignore headers received while importing
4548 0 : if (m_chainman.m_blockman.LoadingBlocks()) {
4549 0 : LogPrint(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId());
4550 0 : return;
4551 : }
4552 :
4553 : // Assume that this is in response to any outstanding getheaders
4554 : // request we may have sent, and clear out the time of our last request
4555 0 : peer->m_last_getheaders_timestamp = {};
4556 :
4557 0 : std::vector<CBlockHeader> headers;
4558 :
4559 : // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
4560 0 : unsigned int nCount = ReadCompactSize(vRecv);
4561 0 : if (nCount > MAX_HEADERS_RESULTS) {
4562 0 : Misbehaving(*peer, 20, strprintf("headers message size = %u", nCount));
4563 0 : return;
4564 : }
4565 0 : headers.resize(nCount);
4566 0 : for (unsigned int n = 0; n < nCount; n++) {
4567 0 : vRecv >> headers[n];
4568 0 : ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
4569 0 : }
4570 :
4571 0 : ProcessHeadersMessage(pfrom, *peer, std::move(headers), /*via_compact_block=*/false);
4572 :
4573 : // Check if the headers presync progress needs to be reported to validation.
4574 : // This needs to be done without holding the m_headers_presync_mutex lock.
4575 0 : if (m_headers_presync_should_signal.exchange(false)) {
4576 0 : HeadersPresyncStats stats;
4577 : {
4578 0 : LOCK(m_headers_presync_mutex);
4579 0 : auto it = m_headers_presync_stats.find(m_headers_presync_bestpeer);
4580 0 : if (it != m_headers_presync_stats.end()) stats = it->second;
4581 0 : }
4582 0 : if (stats.second) {
4583 0 : m_chainman.ReportHeadersPresync(stats.first, stats.second->first, stats.second->second);
4584 0 : }
4585 0 : }
4586 :
4587 0 : return;
4588 0 : }
4589 :
4590 0 : if (msg_type == NetMsgType::BLOCK)
4591 : {
4592 : // Ignore block received while importing
4593 0 : if (m_chainman.m_blockman.LoadingBlocks()) {
4594 0 : LogPrint(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId());
4595 0 : return;
4596 : }
4597 :
4598 0 : std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4599 0 : vRecv >> *pblock;
4600 :
4601 0 : LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId());
4602 :
4603 0 : bool forceProcessing = false;
4604 0 : const uint256 hash(pblock->GetHash());
4605 0 : bool min_pow_checked = false;
4606 : {
4607 0 : LOCK(cs_main);
4608 : // Always process the block if we requested it, since we may
4609 : // need it even when it's not a candidate for a new best tip.
4610 0 : forceProcessing = IsBlockRequested(hash);
4611 0 : RemoveBlockRequest(hash, pfrom.GetId());
4612 : // mapBlockSource is only used for punishing peers and setting
4613 : // which peers send us compact blocks, so the race between here and
4614 : // cs_main in ProcessNewBlock is fine.
4615 0 : mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
4616 :
4617 : // Check work on this block against our anti-dos thresholds.
4618 0 : const CBlockIndex* prev_block = m_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock);
4619 0 : if (prev_block && prev_block->nChainWork + CalculateHeadersWork({pblock->GetBlockHeader()}) >= GetAntiDoSWorkThreshold()) {
4620 0 : min_pow_checked = true;
4621 0 : }
4622 0 : }
4623 0 : ProcessBlock(pfrom, pblock, forceProcessing, min_pow_checked);
4624 : return;
4625 0 : }
4626 :
4627 0 : if (msg_type == NetMsgType::GETADDR) {
4628 : // This asymmetric behavior for inbound and outbound connections was introduced
4629 : // to prevent a fingerprinting attack: an attacker can send specific fake addresses
4630 : // to users' AddrMan and later request them by sending getaddr messages.
4631 : // Making nodes which are behind NAT and can only make outgoing connections ignore
4632 : // the getaddr message mitigates the attack.
4633 0 : if (!pfrom.IsInboundConn()) {
4634 0 : LogPrint(BCLog::NET, "Ignoring \"getaddr\" from %s connection. peer=%d\n", pfrom.ConnectionTypeAsString(), pfrom.GetId());
4635 0 : return;
4636 : }
4637 :
4638 : // Since this must be an inbound connection, SetupAddressRelay will
4639 : // never fail.
4640 0 : Assume(SetupAddressRelay(pfrom, *peer));
4641 :
4642 : // Only send one GetAddr response per connection to reduce resource waste
4643 : // and discourage addr stamping of INV announcements.
4644 0 : if (peer->m_getaddr_recvd) {
4645 0 : LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId());
4646 0 : return;
4647 : }
4648 0 : peer->m_getaddr_recvd = true;
4649 :
4650 0 : peer->m_addrs_to_send.clear();
4651 0 : std::vector<CAddress> vAddr;
4652 0 : if (pfrom.HasPermission(NetPermissionFlags::Addr)) {
4653 0 : vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND, /*network=*/std::nullopt);
4654 0 : } else {
4655 0 : vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND);
4656 : }
4657 0 : FastRandomContext insecure_rand;
4658 0 : for (const CAddress &addr : vAddr) {
4659 0 : PushAddress(*peer, addr, insecure_rand);
4660 : }
4661 : return;
4662 0 : }
4663 :
4664 0 : if (msg_type == NetMsgType::MEMPOOL) {
4665 : // Only process received mempool messages if we advertise NODE_BLOOM
4666 : // or if the peer has mempool permissions.
4667 0 : if (!(peer->m_our_services & NODE_BLOOM) && !pfrom.HasPermission(NetPermissionFlags::Mempool))
4668 : {
4669 0 : if (!pfrom.HasPermission(NetPermissionFlags::NoBan))
4670 : {
4671 0 : LogPrint(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom.GetId());
4672 0 : pfrom.fDisconnect = true;
4673 0 : }
4674 0 : return;
4675 : }
4676 :
4677 0 : if (m_connman.OutboundTargetReached(false) && !pfrom.HasPermission(NetPermissionFlags::Mempool))
4678 : {
4679 0 : if (!pfrom.HasPermission(NetPermissionFlags::NoBan))
4680 : {
4681 0 : LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom.GetId());
4682 0 : pfrom.fDisconnect = true;
4683 0 : }
4684 0 : return;
4685 : }
4686 :
4687 0 : if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4688 0 : LOCK(tx_relay->m_tx_inventory_mutex);
4689 0 : tx_relay->m_send_mempool = true;
4690 0 : }
4691 0 : return;
4692 : }
4693 :
4694 0 : if (msg_type == NetMsgType::PING) {
4695 0 : if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
4696 0 : uint64_t nonce = 0;
4697 0 : vRecv >> nonce;
4698 : // Echo the message back with the nonce. This allows for two useful features:
4699 : //
4700 : // 1) A remote node can quickly check if the connection is operational
4701 : // 2) Remote nodes can measure the latency of the network thread. If this node
4702 : // is overloaded it won't respond to pings quickly and the remote node can
4703 : // avoid sending us more work, like chain download requests.
4704 : //
4705 : // The nonce stops the remote getting confused between different pings: without
4706 : // it, if the remote node sends a ping once per second and this node takes 5
4707 : // seconds to respond to each, the 5th ping the remote sends would appear to
4708 : // return very quickly.
4709 0 : m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
4710 0 : }
4711 0 : return;
4712 : }
4713 :
4714 0 : if (msg_type == NetMsgType::PONG) {
4715 0 : const auto ping_end = time_received;
4716 0 : uint64_t nonce = 0;
4717 0 : size_t nAvail = vRecv.in_avail();
4718 0 : bool bPingFinished = false;
4719 0 : std::string sProblem;
4720 :
4721 0 : if (nAvail >= sizeof(nonce)) {
4722 0 : vRecv >> nonce;
4723 :
4724 : // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
4725 0 : if (peer->m_ping_nonce_sent != 0) {
4726 0 : if (nonce == peer->m_ping_nonce_sent) {
4727 : // Matching pong received, this ping is no longer outstanding
4728 0 : bPingFinished = true;
4729 0 : const auto ping_time = ping_end - peer->m_ping_start.load();
4730 0 : if (ping_time.count() >= 0) {
4731 : // Let connman know about this successful ping-pong
4732 0 : pfrom.PongReceived(ping_time);
4733 0 : } else {
4734 : // This should never happen
4735 0 : sProblem = "Timing mishap";
4736 : }
4737 0 : } else {
4738 : // Nonce mismatches are normal when pings are overlapping
4739 0 : sProblem = "Nonce mismatch";
4740 0 : if (nonce == 0) {
4741 : // This is most likely a bug in another implementation somewhere; cancel this ping
4742 0 : bPingFinished = true;
4743 0 : sProblem = "Nonce zero";
4744 0 : }
4745 : }
4746 0 : } else {
4747 0 : sProblem = "Unsolicited pong without ping";
4748 : }
4749 0 : } else {
4750 : // This is most likely a bug in another implementation somewhere; cancel this ping
4751 0 : bPingFinished = true;
4752 0 : sProblem = "Short payload";
4753 : }
4754 :
4755 0 : if (!(sProblem.empty())) {
4756 0 : LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
4757 : pfrom.GetId(),
4758 : sProblem,
4759 : peer->m_ping_nonce_sent,
4760 : nonce,
4761 : nAvail);
4762 0 : }
4763 0 : if (bPingFinished) {
4764 0 : peer->m_ping_nonce_sent = 0;
4765 0 : }
4766 : return;
4767 0 : }
4768 :
4769 0 : if (msg_type == NetMsgType::FILTERLOAD) {
4770 0 : if (!(peer->m_our_services & NODE_BLOOM)) {
4771 0 : LogPrint(BCLog::NET, "filterload received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
4772 0 : pfrom.fDisconnect = true;
4773 0 : return;
4774 : }
4775 0 : CBloomFilter filter;
4776 0 : vRecv >> filter;
4777 :
4778 0 : if (!filter.IsWithinSizeConstraints())
4779 : {
4780 : // There is no excuse for sending a too-large filter
4781 0 : Misbehaving(*peer, 100, "too-large bloom filter");
4782 0 : } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4783 : {
4784 0 : LOCK(tx_relay->m_bloom_filter_mutex);
4785 0 : tx_relay->m_bloom_filter.reset(new CBloomFilter(filter));
4786 0 : tx_relay->m_relay_txs = true;
4787 0 : }
4788 0 : pfrom.m_bloom_filter_loaded = true;
4789 0 : pfrom.m_relays_txs = true;
4790 0 : }
4791 : return;
4792 0 : }
4793 :
4794 0 : if (msg_type == NetMsgType::FILTERADD) {
4795 0 : if (!(peer->m_our_services & NODE_BLOOM)) {
4796 0 : LogPrint(BCLog::NET, "filteradd received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
4797 0 : pfrom.fDisconnect = true;
4798 0 : return;
4799 : }
4800 0 : std::vector<unsigned char> vData;
4801 0 : vRecv >> vData;
4802 :
4803 : // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
4804 : // and thus, the maximum size any matched object can have) in a filteradd message
4805 0 : bool bad = false;
4806 0 : if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
4807 0 : bad = true;
4808 0 : } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4809 0 : LOCK(tx_relay->m_bloom_filter_mutex);
4810 0 : if (tx_relay->m_bloom_filter) {
4811 0 : tx_relay->m_bloom_filter->insert(vData);
4812 0 : } else {
4813 0 : bad = true;
4814 : }
4815 0 : }
4816 0 : if (bad) {
4817 0 : Misbehaving(*peer, 100, "bad filteradd message");
4818 0 : }
4819 : return;
4820 0 : }
4821 :
4822 0 : if (msg_type == NetMsgType::FILTERCLEAR) {
4823 0 : if (!(peer->m_our_services & NODE_BLOOM)) {
4824 0 : LogPrint(BCLog::NET, "filterclear received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId());
4825 0 : pfrom.fDisconnect = true;
4826 0 : return;
4827 : }
4828 0 : auto tx_relay = peer->GetTxRelay();
4829 0 : if (!tx_relay) return;
4830 :
4831 : {
4832 0 : LOCK(tx_relay->m_bloom_filter_mutex);
4833 0 : tx_relay->m_bloom_filter = nullptr;
4834 0 : tx_relay->m_relay_txs = true;
4835 0 : }
4836 0 : pfrom.m_bloom_filter_loaded = false;
4837 0 : pfrom.m_relays_txs = true;
4838 0 : return;
4839 : }
4840 :
4841 0 : if (msg_type == NetMsgType::FEEFILTER) {
4842 0 : CAmount newFeeFilter = 0;
4843 0 : vRecv >> newFeeFilter;
4844 0 : if (MoneyRange(newFeeFilter)) {
4845 0 : if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
4846 0 : tx_relay->m_fee_filter_received = newFeeFilter;
4847 0 : }
4848 0 : LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
4849 0 : }
4850 0 : return;
4851 : }
4852 :
4853 0 : if (msg_type == NetMsgType::GETCFILTERS) {
4854 0 : ProcessGetCFilters(pfrom, *peer, vRecv);
4855 0 : return;
4856 : }
4857 :
4858 0 : if (msg_type == NetMsgType::GETCFHEADERS) {
4859 0 : ProcessGetCFHeaders(pfrom, *peer, vRecv);
4860 0 : return;
4861 : }
4862 :
4863 0 : if (msg_type == NetMsgType::GETCFCHECKPT) {
4864 0 : ProcessGetCFCheckPt(pfrom, *peer, vRecv);
4865 0 : return;
4866 : }
4867 :
4868 0 : if (msg_type == NetMsgType::NOTFOUND) {
4869 0 : std::vector<CInv> vInv;
4870 0 : vRecv >> vInv;
4871 0 : if (vInv.size() <= MAX_PEER_TX_ANNOUNCEMENTS + MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
4872 0 : LOCK(::cs_main);
4873 0 : for (CInv &inv : vInv) {
4874 0 : if (inv.IsGenTxMsg()) {
4875 : // If we receive a NOTFOUND message for a tx we requested, mark the announcement for it as
4876 : // completed in TxRequestTracker.
4877 0 : m_txrequest.ReceivedResponse(pfrom.GetId(), inv.hash);
4878 0 : }
4879 : }
4880 0 : }
4881 : return;
4882 0 : }
4883 :
4884 : // Ignore unknown commands for extensibility
4885 0 : LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
4886 0 : return;
4887 0 : }
4888 :
4889 0 : bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer)
4890 : {
4891 : {
4892 0 : LOCK(peer.m_misbehavior_mutex);
4893 :
4894 : // There's nothing to do if the m_should_discourage flag isn't set
4895 0 : if (!peer.m_should_discourage) return false;
4896 :
4897 0 : peer.m_should_discourage = false;
4898 0 : } // peer.m_misbehavior_mutex
4899 :
4900 0 : if (pnode.HasPermission(NetPermissionFlags::NoBan)) {
4901 : // We never disconnect or discourage peers for bad behavior if they have NetPermissionFlags::NoBan permission
4902 0 : LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id);
4903 0 : return false;
4904 : }
4905 :
4906 0 : if (pnode.IsManualConn()) {
4907 : // We never disconnect or discourage manual peers for bad behavior
4908 0 : LogPrintf("Warning: not punishing manually connected peer %d!\n", peer.m_id);
4909 0 : return false;
4910 : }
4911 :
4912 0 : if (pnode.addr.IsLocal()) {
4913 : // We disconnect local peers for bad behavior but don't discourage (since that would discourage
4914 : // all peers on the same local address)
4915 0 : LogPrint(BCLog::NET, "Warning: disconnecting but not discouraging %s peer %d!\n",
4916 : pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id);
4917 0 : pnode.fDisconnect = true;
4918 0 : return true;
4919 : }
4920 :
4921 : // Normal case: Disconnect the peer and discourage all nodes sharing the address
4922 0 : LogPrint(BCLog::NET, "Disconnecting and discouraging peer %d!\n", peer.m_id);
4923 0 : if (m_banman) m_banman->Discourage(pnode.addr);
4924 0 : m_connman.DisconnectNode(pnode.addr);
4925 0 : return true;
4926 0 : }
4927 :
4928 0 : bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
4929 : {
4930 0 : AssertLockHeld(g_msgproc_mutex);
4931 :
4932 0 : PeerRef peer = GetPeerRef(pfrom->GetId());
4933 0 : if (peer == nullptr) return false;
4934 :
4935 : {
4936 0 : LOCK(peer->m_getdata_requests_mutex);
4937 0 : if (!peer->m_getdata_requests.empty()) {
4938 0 : ProcessGetData(*pfrom, *peer, interruptMsgProc);
4939 0 : }
4940 0 : }
4941 :
4942 0 : const bool processed_orphan = ProcessOrphanTx(*peer);
4943 :
4944 0 : if (pfrom->fDisconnect)
4945 0 : return false;
4946 :
4947 0 : if (processed_orphan) return true;
4948 :
4949 : // this maintains the order of responses
4950 : // and prevents m_getdata_requests to grow unbounded
4951 : {
4952 0 : LOCK(peer->m_getdata_requests_mutex);
4953 0 : if (!peer->m_getdata_requests.empty()) return true;
4954 0 : }
4955 :
4956 : // Don't bother if send buffer is too full to respond anyway
4957 0 : if (pfrom->fPauseSend) return false;
4958 :
4959 0 : auto poll_result{pfrom->PollMessage()};
4960 0 : if (!poll_result) {
4961 : // No message to process
4962 0 : return false;
4963 : }
4964 :
4965 0 : CNetMessage& msg{poll_result->first};
4966 0 : bool fMoreWork = poll_result->second;
4967 :
4968 : TRACE6(net, inbound_message,
4969 : pfrom->GetId(),
4970 : pfrom->m_addr_name.c_str(),
4971 : pfrom->ConnectionTypeAsString().c_str(),
4972 : msg.m_type.c_str(),
4973 : msg.m_recv.size(),
4974 : msg.m_recv.data()
4975 : );
4976 :
4977 0 : if (m_opts.capture_messages) {
4978 0 : CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true);
4979 0 : }
4980 :
4981 0 : msg.SetVersion(pfrom->GetCommonVersion());
4982 :
4983 : try {
4984 0 : ProcessMessage(*pfrom, msg.m_type, msg.m_recv, msg.m_time, interruptMsgProc);
4985 0 : if (interruptMsgProc) return false;
4986 : {
4987 0 : LOCK(peer->m_getdata_requests_mutex);
4988 0 : if (!peer->m_getdata_requests.empty()) fMoreWork = true;
4989 0 : }
4990 : // Does this peer has an orphan ready to reconsider?
4991 : // (Note: we may have provided a parent for an orphan provided
4992 : // by another peer that was already processed; in that case,
4993 : // the extra work may not be noticed, possibly resulting in an
4994 : // unnecessary 100ms delay)
4995 0 : if (m_orphanage.HaveTxToReconsider(peer->m_id)) fMoreWork = true;
4996 0 : } catch (const std::exception& e) {
4997 0 : LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name());
4998 0 : } catch (...) {
4999 0 : LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size);
5000 0 : }
5001 :
5002 0 : return fMoreWork;
5003 0 : }
5004 :
5005 0 : void PeerManagerImpl::ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds)
5006 : {
5007 0 : AssertLockHeld(cs_main);
5008 :
5009 0 : CNodeState &state = *State(pto.GetId());
5010 :
5011 0 : if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) {
5012 : // This is an outbound peer subject to disconnection if they don't
5013 : // announce a block with as much work as the current tip within
5014 : // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
5015 : // their chain has more work than ours, we should sync to it,
5016 : // unless it's invalid, in which case we should find that out and
5017 : // disconnect from them elsewhere).
5018 0 : if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork) {
5019 0 : if (state.m_chain_sync.m_timeout != 0s) {
5020 0 : state.m_chain_sync.m_timeout = 0s;
5021 0 : state.m_chain_sync.m_work_header = nullptr;
5022 0 : state.m_chain_sync.m_sent_getheaders = false;
5023 0 : }
5024 0 : } else if (state.m_chain_sync.m_timeout == 0s || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
5025 : // Our best block known by this peer is behind our tip, and we're either noticing
5026 : // that for the first time, OR this peer was able to catch up to some earlier point
5027 : // where we checked against our tip.
5028 : // Either way, set a new timeout based on current tip.
5029 0 : state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
5030 0 : state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip();
5031 0 : state.m_chain_sync.m_sent_getheaders = false;
5032 0 : } else if (state.m_chain_sync.m_timeout > 0s && time_in_seconds > state.m_chain_sync.m_timeout) {
5033 : // No evidence yet that our peer has synced to a chain with work equal to that
5034 : // of our tip, when we first detected it was behind. Send a single getheaders
5035 : // message to give the peer a chance to update us.
5036 0 : if (state.m_chain_sync.m_sent_getheaders) {
5037 : // They've run out of time to catch up!
5038 0 : LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>");
5039 0 : pto.fDisconnect = true;
5040 0 : } else {
5041 0 : assert(state.m_chain_sync.m_work_header);
5042 : // Here, we assume that the getheaders message goes out,
5043 : // because it'll either go out or be skipped because of a
5044 : // getheaders in-flight already, in which case the peer should
5045 : // still respond to us with a sufficiently high work chain tip.
5046 0 : MaybeSendGetHeaders(pto,
5047 0 : GetLocator(state.m_chain_sync.m_work_header->pprev),
5048 0 : peer);
5049 0 : LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
5050 0 : state.m_chain_sync.m_sent_getheaders = true;
5051 : // Bump the timeout to allow a response, which could clear the timeout
5052 : // (if the response shows the peer has synced), reset the timeout (if
5053 : // the peer syncs to the required work but not to our tip), or result
5054 : // in disconnect (if we advance to the timeout and pindexBestKnownBlock
5055 : // has not sufficiently progressed)
5056 0 : state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
5057 : }
5058 0 : }
5059 0 : }
5060 0 : }
5061 :
5062 0 : void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now)
5063 : {
5064 : // If we have any extra block-relay-only peers, disconnect the youngest unless
5065 : // it's given us a block -- in which case, compare with the second-youngest, and
5066 : // out of those two, disconnect the peer who least recently gave us a block.
5067 : // The youngest block-relay-only peer would be the extra peer we connected
5068 : // to temporarily in order to sync our tip; see net.cpp.
5069 : // Note that we use higher nodeid as a measure for most recent connection.
5070 0 : if (m_connman.GetExtraBlockRelayCount() > 0) {
5071 0 : std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0}, next_youngest_peer{-1, 0};
5072 :
5073 0 : m_connman.ForEachNode([&](CNode* pnode) {
5074 0 : if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) return;
5075 0 : if (pnode->GetId() > youngest_peer.first) {
5076 0 : next_youngest_peer = youngest_peer;
5077 0 : youngest_peer.first = pnode->GetId();
5078 0 : youngest_peer.second = pnode->m_last_block_time;
5079 0 : }
5080 0 : });
5081 0 : NodeId to_disconnect = youngest_peer.first;
5082 0 : if (youngest_peer.second > next_youngest_peer.second) {
5083 : // Our newest block-relay-only peer gave us a block more recently;
5084 : // disconnect our second youngest.
5085 0 : to_disconnect = next_youngest_peer.first;
5086 0 : }
5087 0 : m_connman.ForNode(to_disconnect, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
5088 0 : AssertLockHeld(::cs_main);
5089 : // Make sure we're not getting a block right now, and that
5090 : // we've been connected long enough for this eviction to happen
5091 : // at all.
5092 : // Note that we only request blocks from a peer if we learn of a
5093 : // valid headers chain with at least as much work as our tip.
5094 0 : CNodeState *node_state = State(pnode->GetId());
5095 0 : if (node_state == nullptr ||
5096 0 : (now - pnode->m_connected >= MINIMUM_CONNECT_TIME && node_state->vBlocksInFlight.empty())) {
5097 0 : pnode->fDisconnect = true;
5098 0 : LogPrint(BCLog::NET, "disconnecting extra block-relay-only peer=%d (last block received at time %d)\n",
5099 : pnode->GetId(), count_seconds(pnode->m_last_block_time));
5100 0 : return true;
5101 : } else {
5102 0 : LogPrint(BCLog::NET, "keeping block-relay-only peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
5103 : pnode->GetId(), count_seconds(pnode->m_connected), node_state->vBlocksInFlight.size());
5104 : }
5105 0 : return false;
5106 0 : });
5107 0 : }
5108 :
5109 : // Check whether we have too many outbound-full-relay peers
5110 0 : if (m_connman.GetExtraFullOutboundCount() > 0) {
5111 : // If we have more outbound-full-relay peers than we target, disconnect one.
5112 : // Pick the outbound-full-relay peer that least recently announced
5113 : // us a new block, with ties broken by choosing the more recent
5114 : // connection (higher node id)
5115 : // Protect peers from eviction if we don't have another connection
5116 : // to their network, counting both outbound-full-relay and manual peers.
5117 0 : NodeId worst_peer = -1;
5118 0 : int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
5119 :
5120 0 : m_connman.ForEachNode([&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_connman.GetNodesMutex()) {
5121 0 : AssertLockHeld(::cs_main);
5122 :
5123 : // Only consider outbound-full-relay peers that are not already
5124 : // marked for disconnection
5125 0 : if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) return;
5126 0 : CNodeState *state = State(pnode->GetId());
5127 0 : if (state == nullptr) return; // shouldn't be possible, but just in case
5128 : // Don't evict our protected peers
5129 0 : if (state->m_chain_sync.m_protect) return;
5130 : // If this is the only connection on a particular network that is
5131 : // OUTBOUND_FULL_RELAY or MANUAL, protect it.
5132 0 : if (!m_connman.MultipleManualOrFullOutboundConns(pnode->addr.GetNetwork())) return;
5133 0 : if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
5134 0 : worst_peer = pnode->GetId();
5135 0 : oldest_block_announcement = state->m_last_block_announcement;
5136 0 : }
5137 0 : });
5138 0 : if (worst_peer != -1) {
5139 0 : bool disconnected = m_connman.ForNode(worst_peer, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
5140 0 : AssertLockHeld(::cs_main);
5141 :
5142 : // Only disconnect a peer that has been connected to us for
5143 : // some reasonable fraction of our check-frequency, to give
5144 : // it time for new information to have arrived.
5145 : // Also don't disconnect any peer we're trying to download a
5146 : // block from.
5147 0 : CNodeState &state = *State(pnode->GetId());
5148 0 : if (now - pnode->m_connected > MINIMUM_CONNECT_TIME && state.vBlocksInFlight.empty()) {
5149 0 : LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
5150 0 : pnode->fDisconnect = true;
5151 0 : return true;
5152 : } else {
5153 0 : LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
5154 : pnode->GetId(), count_seconds(pnode->m_connected), state.vBlocksInFlight.size());
5155 0 : return false;
5156 : }
5157 0 : });
5158 0 : if (disconnected) {
5159 : // If we disconnected an extra peer, that means we successfully
5160 : // connected to at least one peer after the last time we
5161 : // detected a stale tip. Don't try any more extra peers until
5162 : // we next detect a stale tip, to limit the load we put on the
5163 : // network from these extra connections.
5164 0 : m_connman.SetTryNewOutboundPeer(false);
5165 0 : }
5166 0 : }
5167 0 : }
5168 0 : }
5169 :
5170 0 : void PeerManagerImpl::CheckForStaleTipAndEvictPeers()
5171 : {
5172 0 : LOCK(cs_main);
5173 :
5174 0 : auto now{GetTime<std::chrono::seconds>()};
5175 :
5176 0 : EvictExtraOutboundPeers(now);
5177 :
5178 0 : if (now > m_stale_tip_check_time) {
5179 : // Check whether our tip is stale, and if so, allow using an extra
5180 : // outbound peer
5181 0 : if (!m_chainman.m_blockman.LoadingBlocks() && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) {
5182 0 : LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n",
5183 : count_seconds(now - m_last_tip_update.load()));
5184 0 : m_connman.SetTryNewOutboundPeer(true);
5185 0 : } else if (m_connman.GetTryNewOutboundPeer()) {
5186 0 : m_connman.SetTryNewOutboundPeer(false);
5187 0 : }
5188 0 : m_stale_tip_check_time = now + STALE_CHECK_INTERVAL;
5189 0 : }
5190 :
5191 0 : if (!m_initial_sync_finished && CanDirectFetch()) {
5192 0 : m_connman.StartExtraBlockRelayPeers();
5193 0 : m_initial_sync_finished = true;
5194 0 : }
5195 0 : }
5196 :
5197 0 : void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now)
5198 : {
5199 0 : if (m_connman.ShouldRunInactivityChecks(node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
5200 0 : peer.m_ping_nonce_sent &&
5201 0 : now > peer.m_ping_start.load() + TIMEOUT_INTERVAL)
5202 : {
5203 : // The ping timeout is using mocktime. To disable the check during
5204 : // testing, increase -peertimeout.
5205 0 : LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), peer.m_id);
5206 0 : node_to.fDisconnect = true;
5207 0 : return;
5208 : }
5209 :
5210 0 : const CNetMsgMaker msgMaker(node_to.GetCommonVersion());
5211 0 : bool pingSend = false;
5212 :
5213 0 : if (peer.m_ping_queued) {
5214 : // RPC ping request by user
5215 0 : pingSend = true;
5216 0 : }
5217 :
5218 0 : if (peer.m_ping_nonce_sent == 0 && now > peer.m_ping_start.load() + PING_INTERVAL) {
5219 : // Ping automatically sent as a latency probe & keepalive.
5220 0 : pingSend = true;
5221 0 : }
5222 :
5223 0 : if (pingSend) {
5224 : uint64_t nonce;
5225 0 : do {
5226 0 : nonce = GetRand<uint64_t>();
5227 0 : } while (nonce == 0);
5228 0 : peer.m_ping_queued = false;
5229 0 : peer.m_ping_start = now;
5230 0 : if (node_to.GetCommonVersion() > BIP0031_VERSION) {
5231 0 : peer.m_ping_nonce_sent = nonce;
5232 0 : m_connman.PushMessage(&node_to, msgMaker.Make(NetMsgType::PING, nonce));
5233 0 : } else {
5234 : // Peer is too old to support ping command with nonce, pong will never arrive.
5235 0 : peer.m_ping_nonce_sent = 0;
5236 0 : m_connman.PushMessage(&node_to, msgMaker.Make(NetMsgType::PING));
5237 : }
5238 0 : }
5239 0 : }
5240 :
5241 0 : void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time)
5242 : {
5243 : // Nothing to do for non-address-relay peers
5244 0 : if (!peer.m_addr_relay_enabled) return;
5245 :
5246 0 : LOCK(peer.m_addr_send_times_mutex);
5247 : // Periodically advertise our local address to the peer.
5248 0 : if (fListen && !m_chainman.IsInitialBlockDownload() &&
5249 0 : peer.m_next_local_addr_send < current_time) {
5250 : // If we've sent before, clear the bloom filter for the peer, so that our
5251 : // self-announcement will actually go out.
5252 : // This might be unnecessary if the bloom filter has already rolled
5253 : // over since our last self-announcement, but there is only a small
5254 : // bandwidth cost that we can incur by doing this (which happens
5255 : // once a day on average).
5256 0 : if (peer.m_next_local_addr_send != 0us) {
5257 0 : peer.m_addr_known->reset();
5258 0 : }
5259 0 : if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) {
5260 0 : CAddress local_addr{*local_service, peer.m_our_services, Now<NodeSeconds>()};
5261 0 : FastRandomContext insecure_rand;
5262 0 : PushAddress(peer, local_addr, insecure_rand);
5263 0 : }
5264 0 : peer.m_next_local_addr_send = GetExponentialRand(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
5265 0 : }
5266 :
5267 : // We sent an `addr` message to this peer recently. Nothing more to do.
5268 0 : if (current_time <= peer.m_next_addr_send) return;
5269 :
5270 0 : peer.m_next_addr_send = GetExponentialRand(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
5271 :
5272 0 : if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) {
5273 : // Should be impossible since we always check size before adding to
5274 : // m_addrs_to_send. Recover by trimming the vector.
5275 0 : peer.m_addrs_to_send.resize(MAX_ADDR_TO_SEND);
5276 0 : }
5277 :
5278 : // Remove addr records that the peer already knows about, and add new
5279 : // addrs to the m_addr_known filter on the same pass.
5280 0 : auto addr_already_known = [&peer](const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) {
5281 0 : bool ret = peer.m_addr_known->contains(addr.GetKey());
5282 0 : if (!ret) peer.m_addr_known->insert(addr.GetKey());
5283 0 : return ret;
5284 0 : };
5285 0 : peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(), peer.m_addrs_to_send.end(), addr_already_known),
5286 0 : peer.m_addrs_to_send.end());
5287 :
5288 : // No addr messages to send
5289 0 : if (peer.m_addrs_to_send.empty()) return;
5290 :
5291 : const char* msg_type;
5292 : CNetAddr::Encoding ser_enc;
5293 0 : if (peer.m_wants_addrv2) {
5294 0 : msg_type = NetMsgType::ADDRV2;
5295 0 : ser_enc = CNetAddr::Encoding::V2;
5296 0 : } else {
5297 0 : msg_type = NetMsgType::ADDR;
5298 0 : ser_enc = CNetAddr::Encoding::V1;
5299 : }
5300 0 : m_connman.PushMessage(&node, CNetMsgMaker(node.GetCommonVersion()).Make(msg_type, WithParams(CAddress::SerParams{{ser_enc}, CAddress::Format::Network}, peer.m_addrs_to_send)));
5301 0 : peer.m_addrs_to_send.clear();
5302 :
5303 : // we only send the big addr message once
5304 0 : if (peer.m_addrs_to_send.capacity() > 40) {
5305 0 : peer.m_addrs_to_send.shrink_to_fit();
5306 0 : }
5307 0 : }
5308 :
5309 0 : void PeerManagerImpl::MaybeSendSendHeaders(CNode& node, Peer& peer)
5310 : {
5311 : // Delay sending SENDHEADERS (BIP 130) until we're done with an
5312 : // initial-headers-sync with this peer. Receiving headers announcements for
5313 : // new blocks while trying to sync their headers chain is problematic,
5314 : // because of the state tracking done.
5315 0 : if (!peer.m_sent_sendheaders && node.GetCommonVersion() >= SENDHEADERS_VERSION) {
5316 0 : LOCK(cs_main);
5317 0 : CNodeState &state = *State(node.GetId());
5318 0 : if (state.pindexBestKnownBlock != nullptr &&
5319 0 : state.pindexBestKnownBlock->nChainWork > m_chainman.MinimumChainWork()) {
5320 : // Tell our peer we prefer to receive headers rather than inv's
5321 : // We send this to non-NODE NETWORK peers as well, because even
5322 : // non-NODE NETWORK peers can announce blocks (such as pruning
5323 : // nodes)
5324 0 : m_connman.PushMessage(&node, CNetMsgMaker(node.GetCommonVersion()).Make(NetMsgType::SENDHEADERS));
5325 0 : peer.m_sent_sendheaders = true;
5326 0 : }
5327 0 : }
5328 0 : }
5329 :
5330 0 : void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::microseconds current_time)
5331 : {
5332 0 : if (m_opts.ignore_incoming_txs) return;
5333 0 : if (pto.GetCommonVersion() < FEEFILTER_VERSION) return;
5334 : // peers with the forcerelay permission should not filter txs to us
5335 0 : if (pto.HasPermission(NetPermissionFlags::ForceRelay)) return;
5336 : // Don't send feefilter messages to outbound block-relay-only peers since they should never announce
5337 : // transactions to us, regardless of feefilter state.
5338 0 : if (pto.IsBlockOnlyConn()) return;
5339 :
5340 0 : CAmount currentFilter = m_mempool.GetMinFee().GetFeePerK();
5341 0 : static FeeFilterRounder g_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}};
5342 :
5343 0 : if (m_chainman.IsInitialBlockDownload()) {
5344 : // Received tx-inv messages are discarded when the active
5345 : // chainstate is in IBD, so tell the peer to not send them.
5346 0 : currentFilter = MAX_MONEY;
5347 0 : } else {
5348 0 : static const CAmount MAX_FILTER{g_filter_rounder.round(MAX_MONEY)};
5349 0 : if (peer.m_fee_filter_sent == MAX_FILTER) {
5350 : // Send the current filter if we sent MAX_FILTER previously
5351 : // and made it out of IBD.
5352 0 : peer.m_next_send_feefilter = 0us;
5353 0 : }
5354 : }
5355 0 : if (current_time > peer.m_next_send_feefilter) {
5356 0 : CAmount filterToSend = g_filter_rounder.round(currentFilter);
5357 : // We always have a fee filter of at least the min relay fee
5358 0 : filterToSend = std::max(filterToSend, m_mempool.m_min_relay_feerate.GetFeePerK());
5359 0 : if (filterToSend != peer.m_fee_filter_sent) {
5360 0 : m_connman.PushMessage(&pto, CNetMsgMaker(pto.GetCommonVersion()).Make(NetMsgType::FEEFILTER, filterToSend));
5361 0 : peer.m_fee_filter_sent = filterToSend;
5362 0 : }
5363 0 : peer.m_next_send_feefilter = GetExponentialRand(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL);
5364 0 : }
5365 : // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
5366 : // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
5367 0 : else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < peer.m_next_send_feefilter &&
5368 0 : (currentFilter < 3 * peer.m_fee_filter_sent / 4 || currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
5369 0 : peer.m_next_send_feefilter = current_time + GetRandomDuration<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY);
5370 0 : }
5371 0 : }
5372 :
5373 : namespace {
5374 : class CompareInvMempoolOrder
5375 : {
5376 : CTxMemPool* mp;
5377 : bool m_wtxid_relay;
5378 : public:
5379 0 : explicit CompareInvMempoolOrder(CTxMemPool *_mempool, bool use_wtxid)
5380 : {
5381 0 : mp = _mempool;
5382 0 : m_wtxid_relay = use_wtxid;
5383 0 : }
5384 :
5385 0 : bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
5386 : {
5387 : /* As std::make_heap produces a max-heap, we want the entries with the
5388 : * fewest ancestors/highest fee to sort later. */
5389 0 : return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay);
5390 : }
5391 : };
5392 : } // namespace
5393 :
5394 0 : bool PeerManagerImpl::RejectIncomingTxs(const CNode& peer) const
5395 : {
5396 : // block-relay-only peers may never send txs to us
5397 0 : if (peer.IsBlockOnlyConn()) return true;
5398 0 : if (peer.IsFeelerConn()) return true;
5399 : // In -blocksonly mode, peers need the 'relay' permission to send txs to us
5400 0 : if (m_opts.ignore_incoming_txs && !peer.HasPermission(NetPermissionFlags::Relay)) return true;
5401 0 : return false;
5402 0 : }
5403 :
5404 0 : bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer)
5405 : {
5406 : // We don't participate in addr relay with outbound block-relay-only
5407 : // connections to prevent providing adversaries with the additional
5408 : // information of addr traffic to infer the link.
5409 0 : if (node.IsBlockOnlyConn()) return false;
5410 :
5411 0 : if (!peer.m_addr_relay_enabled.exchange(true)) {
5412 : // During version message processing (non-block-relay-only outbound peers)
5413 : // or on first addr-related message we have received (inbound peers), initialize
5414 : // m_addr_known.
5415 0 : peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
5416 0 : }
5417 :
5418 0 : return true;
5419 0 : }
5420 :
5421 0 : bool PeerManagerImpl::SendMessages(CNode* pto)
5422 : {
5423 0 : AssertLockHeld(g_msgproc_mutex);
5424 :
5425 0 : PeerRef peer = GetPeerRef(pto->GetId());
5426 0 : if (!peer) return false;
5427 0 : const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
5428 :
5429 : // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
5430 : // disconnect misbehaving peers even before the version handshake is complete.
5431 0 : if (MaybeDiscourageAndDisconnect(*pto, *peer)) return true;
5432 :
5433 : // Don't send anything until the version handshake is complete
5434 0 : if (!pto->fSuccessfullyConnected || pto->fDisconnect)
5435 0 : return true;
5436 :
5437 : // If we get here, the outgoing message serialization version is set and can't change.
5438 0 : const CNetMsgMaker msgMaker(pto->GetCommonVersion());
5439 :
5440 0 : const auto current_time{GetTime<std::chrono::microseconds>()};
5441 :
5442 0 : if (pto->IsAddrFetchConn() && current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
5443 0 : LogPrint(BCLog::NET, "addrfetch connection timeout; disconnecting peer=%d\n", pto->GetId());
5444 0 : pto->fDisconnect = true;
5445 0 : return true;
5446 : }
5447 :
5448 0 : MaybeSendPing(*pto, *peer, current_time);
5449 :
5450 : // MaybeSendPing may have marked peer for disconnection
5451 0 : if (pto->fDisconnect) return true;
5452 :
5453 0 : MaybeSendAddr(*pto, *peer, current_time);
5454 :
5455 0 : MaybeSendSendHeaders(*pto, *peer);
5456 :
5457 : {
5458 0 : LOCK(cs_main);
5459 :
5460 0 : CNodeState &state = *State(pto->GetId());
5461 :
5462 : // Start block sync
5463 0 : if (m_chainman.m_best_header == nullptr) {
5464 0 : m_chainman.m_best_header = m_chainman.ActiveChain().Tip();
5465 0 : }
5466 :
5467 : // Determine whether we might try initial headers sync or parallel
5468 : // block download from this peer -- this mostly affects behavior while
5469 : // in IBD (once out of IBD, we sync from all peers).
5470 0 : bool sync_blocks_and_headers_from_peer = false;
5471 0 : if (state.fPreferredDownload) {
5472 0 : sync_blocks_and_headers_from_peer = true;
5473 0 : } else if (CanServeBlocks(*peer) && !pto->IsAddrFetchConn()) {
5474 : // Typically this is an inbound peer. If we don't have any outbound
5475 : // peers, or if we aren't downloading any blocks from such peers,
5476 : // then allow block downloads from this peer, too.
5477 : // We prefer downloading blocks from outbound peers to avoid
5478 : // putting undue load on (say) some home user who is just making
5479 : // outbound connections to the network, but if our only source of
5480 : // the latest blocks is from an inbound peer, we have to be sure to
5481 : // eventually download it (and not just wait indefinitely for an
5482 : // outbound peer to have it).
5483 0 : if (m_num_preferred_download_peers == 0 || mapBlocksInFlight.empty()) {
5484 0 : sync_blocks_and_headers_from_peer = true;
5485 0 : }
5486 0 : }
5487 :
5488 0 : if (!state.fSyncStarted && CanServeBlocks(*peer) && !m_chainman.m_blockman.LoadingBlocks()) {
5489 : // Only actively request headers from a single peer, unless we're close to today.
5490 0 : if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() > GetAdjustedTime() - 24h) {
5491 0 : const CBlockIndex* pindexStart = m_chainman.m_best_header;
5492 : /* If possible, start at the block preceding the currently
5493 : best known header. This ensures that we always get a
5494 : non-empty list of headers back as long as the peer
5495 : is up-to-date. With a non-empty response, we can initialise
5496 : the peer's known best block. This wouldn't be possible
5497 : if we requested starting at m_chainman.m_best_header and
5498 : got back an empty response. */
5499 0 : if (pindexStart->pprev)
5500 0 : pindexStart = pindexStart->pprev;
5501 0 : if (MaybeSendGetHeaders(*pto, GetLocator(pindexStart), *peer)) {
5502 0 : LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height);
5503 :
5504 0 : state.fSyncStarted = true;
5505 0 : peer->m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
5506 : (
5507 : // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
5508 : // to maintain precision
5509 0 : std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
5510 0 : Ticks<std::chrono::seconds>(GetAdjustedTime() - m_chainman.m_best_header->Time()) / consensusParams.nPowTargetSpacing
5511 : );
5512 0 : nSyncStarted++;
5513 0 : }
5514 0 : }
5515 0 : }
5516 :
5517 : //
5518 : // Try sending block announcements via headers
5519 : //
5520 : {
5521 : // If we have no more than MAX_BLOCKS_TO_ANNOUNCE in our
5522 : // list of block hashes we're relaying, and our peer wants
5523 : // headers announcements, then find the first header
5524 : // not yet known to our peer but would connect, and send.
5525 : // If no header would connect, or if we have too many
5526 : // blocks, or if the peer doesn't want headers, just
5527 : // add all to the inv queue.
5528 0 : LOCK(peer->m_block_inv_mutex);
5529 0 : std::vector<CBlock> vHeaders;
5530 0 : bool fRevertToInv = ((!peer->m_prefers_headers &&
5531 0 : (!state.m_requested_hb_cmpctblocks || peer->m_blocks_for_headers_relay.size() > 1)) ||
5532 0 : peer->m_blocks_for_headers_relay.size() > MAX_BLOCKS_TO_ANNOUNCE);
5533 0 : const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
5534 0 : ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
5535 :
5536 0 : if (!fRevertToInv) {
5537 0 : bool fFoundStartingHeader = false;
5538 : // Try to find first header that our peer doesn't have, and
5539 : // then send all headers past that one. If we come across any
5540 : // headers that aren't on m_chainman.ActiveChain(), give up.
5541 0 : for (const uint256& hash : peer->m_blocks_for_headers_relay) {
5542 0 : const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
5543 0 : assert(pindex);
5544 0 : if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
5545 : // Bail out if we reorged away from this block
5546 0 : fRevertToInv = true;
5547 0 : break;
5548 : }
5549 0 : if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
5550 : // This means that the list of blocks to announce don't
5551 : // connect to each other.
5552 : // This shouldn't really be possible to hit during
5553 : // regular operation (because reorgs should take us to
5554 : // a chain that has some block not on the prior chain,
5555 : // which should be caught by the prior check), but one
5556 : // way this could happen is by using invalidateblock /
5557 : // reconsiderblock repeatedly on the tip, causing it to
5558 : // be added multiple times to m_blocks_for_headers_relay.
5559 : // Robustly deal with this rare situation by reverting
5560 : // to an inv.
5561 0 : fRevertToInv = true;
5562 0 : break;
5563 : }
5564 0 : pBestIndex = pindex;
5565 0 : if (fFoundStartingHeader) {
5566 : // add this to the headers message
5567 0 : vHeaders.push_back(pindex->GetBlockHeader());
5568 0 : } else if (PeerHasHeader(&state, pindex)) {
5569 0 : continue; // keep looking for the first new block
5570 0 : } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) {
5571 : // Peer doesn't have this header but they do have the prior one.
5572 : // Start sending headers.
5573 0 : fFoundStartingHeader = true;
5574 0 : vHeaders.push_back(pindex->GetBlockHeader());
5575 0 : } else {
5576 : // Peer doesn't have this header or the prior one -- nothing will
5577 : // connect, so bail out.
5578 0 : fRevertToInv = true;
5579 0 : break;
5580 : }
5581 : }
5582 0 : }
5583 0 : if (!fRevertToInv && !vHeaders.empty()) {
5584 0 : if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
5585 : // We only send up to 1 block as header-and-ids, as otherwise
5586 : // probably means we're doing an initial-ish-sync or they're slow
5587 0 : LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
5588 : vHeaders.front().GetHash().ToString(), pto->GetId());
5589 :
5590 0 : std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
5591 : {
5592 0 : LOCK(m_most_recent_block_mutex);
5593 0 : if (m_most_recent_block_hash == pBestIndex->GetBlockHash()) {
5594 0 : cached_cmpctblock_msg = msgMaker.Make(NetMsgType::CMPCTBLOCK, *m_most_recent_compact_block);
5595 0 : }
5596 0 : }
5597 0 : if (cached_cmpctblock_msg.has_value()) {
5598 0 : m_connman.PushMessage(pto, std::move(cached_cmpctblock_msg.value()));
5599 0 : } else {
5600 0 : CBlock block;
5601 0 : const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(block, *pBestIndex)};
5602 0 : assert(ret);
5603 0 : CBlockHeaderAndShortTxIDs cmpctblock{block};
5604 0 : m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::CMPCTBLOCK, cmpctblock));
5605 0 : }
5606 0 : state.pindexBestHeaderSent = pBestIndex;
5607 0 : } else if (peer->m_prefers_headers) {
5608 0 : if (vHeaders.size() > 1) {
5609 0 : LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
5610 : vHeaders.size(),
5611 : vHeaders.front().GetHash().ToString(),
5612 : vHeaders.back().GetHash().ToString(), pto->GetId());
5613 0 : } else {
5614 0 : LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
5615 : vHeaders.front().GetHash().ToString(), pto->GetId());
5616 : }
5617 0 : m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
5618 0 : state.pindexBestHeaderSent = pBestIndex;
5619 0 : } else
5620 0 : fRevertToInv = true;
5621 0 : }
5622 0 : if (fRevertToInv) {
5623 : // If falling back to using an inv, just try to inv the tip.
5624 : // The last entry in m_blocks_for_headers_relay was our tip at some point
5625 : // in the past.
5626 0 : if (!peer->m_blocks_for_headers_relay.empty()) {
5627 0 : const uint256& hashToAnnounce = peer->m_blocks_for_headers_relay.back();
5628 0 : const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce);
5629 0 : assert(pindex);
5630 :
5631 : // Warn if we're announcing a block that is not on the main chain.
5632 : // This should be very rare and could be optimized out.
5633 : // Just log for now.
5634 0 : if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
5635 0 : LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
5636 : hashToAnnounce.ToString(), m_chainman.ActiveChain().Tip()->GetBlockHash().ToString());
5637 0 : }
5638 :
5639 : // If the peer's chain has this block, don't inv it back.
5640 0 : if (!PeerHasHeader(&state, pindex)) {
5641 0 : peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
5642 0 : LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
5643 : pto->GetId(), hashToAnnounce.ToString());
5644 0 : }
5645 0 : }
5646 0 : }
5647 0 : peer->m_blocks_for_headers_relay.clear();
5648 0 : }
5649 :
5650 : //
5651 : // Message: inventory
5652 : //
5653 0 : std::vector<CInv> vInv;
5654 : {
5655 0 : LOCK(peer->m_block_inv_mutex);
5656 0 : vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(), INVENTORY_BROADCAST_TARGET));
5657 :
5658 : // Add blocks
5659 0 : for (const uint256& hash : peer->m_blocks_for_inv_relay) {
5660 0 : vInv.push_back(CInv(MSG_BLOCK, hash));
5661 0 : if (vInv.size() == MAX_INV_SZ) {
5662 0 : m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
5663 0 : vInv.clear();
5664 0 : }
5665 : }
5666 0 : peer->m_blocks_for_inv_relay.clear();
5667 0 : }
5668 :
5669 0 : if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
5670 0 : LOCK(tx_relay->m_tx_inventory_mutex);
5671 : // Check whether periodic sends should happen
5672 0 : bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan);
5673 0 : if (tx_relay->m_next_inv_send_time < current_time) {
5674 0 : fSendTrickle = true;
5675 0 : if (pto->IsInboundConn()) {
5676 0 : tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
5677 0 : } else {
5678 0 : tx_relay->m_next_inv_send_time = GetExponentialRand(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
5679 : }
5680 0 : }
5681 :
5682 : // Time to send but the peer has requested we not relay transactions.
5683 0 : if (fSendTrickle) {
5684 0 : LOCK(tx_relay->m_bloom_filter_mutex);
5685 0 : if (!tx_relay->m_relay_txs) tx_relay->m_tx_inventory_to_send.clear();
5686 0 : }
5687 :
5688 : // Respond to BIP35 mempool requests
5689 0 : if (fSendTrickle && tx_relay->m_send_mempool) {
5690 0 : auto vtxinfo = m_mempool.infoAll();
5691 0 : tx_relay->m_send_mempool = false;
5692 0 : const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()};
5693 :
5694 0 : LOCK(tx_relay->m_bloom_filter_mutex);
5695 :
5696 0 : for (const auto& txinfo : vtxinfo) {
5697 0 : const uint256& hash = peer->m_wtxid_relay ? txinfo.tx->GetWitnessHash() : txinfo.tx->GetHash();
5698 0 : CInv inv(peer->m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
5699 0 : tx_relay->m_tx_inventory_to_send.erase(hash);
5700 : // Don't send transactions that peers will not put into their mempool
5701 0 : if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
5702 0 : continue;
5703 : }
5704 0 : if (tx_relay->m_bloom_filter) {
5705 0 : if (!tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
5706 0 : }
5707 0 : tx_relay->m_tx_inventory_known_filter.insert(hash);
5708 0 : vInv.push_back(inv);
5709 0 : if (vInv.size() == MAX_INV_SZ) {
5710 0 : m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
5711 0 : vInv.clear();
5712 0 : }
5713 : }
5714 0 : }
5715 :
5716 : // Determine transactions to relay
5717 0 : if (fSendTrickle) {
5718 : // Produce a vector with all candidates for sending
5719 0 : std::vector<std::set<uint256>::iterator> vInvTx;
5720 0 : vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
5721 0 : for (std::set<uint256>::iterator it = tx_relay->m_tx_inventory_to_send.begin(); it != tx_relay->m_tx_inventory_to_send.end(); it++) {
5722 0 : vInvTx.push_back(it);
5723 0 : }
5724 0 : const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()};
5725 : // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
5726 : // A heap is used so that not all items need sorting if only a few are being sent.
5727 0 : CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, peer->m_wtxid_relay);
5728 0 : std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
5729 : // No reason to drain out at many times the network's capacity,
5730 : // especially since we have many peers and some will draw much shorter delays.
5731 0 : unsigned int nRelayedTransactions = 0;
5732 0 : LOCK(tx_relay->m_bloom_filter_mutex);
5733 0 : size_t broadcast_max{INVENTORY_BROADCAST_TARGET + (tx_relay->m_tx_inventory_to_send.size()/1000)*5};
5734 0 : broadcast_max = std::min<size_t>(INVENTORY_BROADCAST_MAX, broadcast_max);
5735 0 : while (!vInvTx.empty() && nRelayedTransactions < broadcast_max) {
5736 : // Fetch the top element from the heap
5737 0 : std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
5738 0 : std::set<uint256>::iterator it = vInvTx.back();
5739 0 : vInvTx.pop_back();
5740 0 : uint256 hash = *it;
5741 0 : CInv inv(peer->m_wtxid_relay ? MSG_WTX : MSG_TX, hash);
5742 : // Remove it from the to-be-sent set
5743 0 : tx_relay->m_tx_inventory_to_send.erase(it);
5744 : // Check if not in the filter already
5745 0 : if (tx_relay->m_tx_inventory_known_filter.contains(hash)) {
5746 0 : continue;
5747 : }
5748 : // Not in the mempool anymore? don't bother sending it.
5749 0 : auto txinfo = m_mempool.info(ToGenTxid(inv));
5750 0 : if (!txinfo.tx) {
5751 0 : continue;
5752 : }
5753 : // Peer told you to not send transactions at that feerate? Don't bother sending it.
5754 0 : if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
5755 0 : continue;
5756 : }
5757 0 : if (tx_relay->m_bloom_filter && !tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
5758 : // Send
5759 0 : vInv.push_back(inv);
5760 0 : nRelayedTransactions++;
5761 0 : if (vInv.size() == MAX_INV_SZ) {
5762 0 : m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
5763 0 : vInv.clear();
5764 0 : }
5765 0 : tx_relay->m_tx_inventory_known_filter.insert(hash);
5766 0 : }
5767 :
5768 : // Ensure we'll respond to GETDATA requests for anything we've just announced
5769 0 : LOCK(m_mempool.cs);
5770 0 : tx_relay->m_last_inv_sequence = m_mempool.GetSequence();
5771 0 : }
5772 0 : }
5773 0 : if (!vInv.empty())
5774 0 : m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
5775 :
5776 : // Detect whether we're stalling
5777 0 : auto stalling_timeout = m_block_stalling_timeout.load();
5778 0 : if (state.m_stalling_since.count() && state.m_stalling_since < current_time - stalling_timeout) {
5779 : // Stalling only triggers when the block download window cannot move. During normal steady state,
5780 : // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
5781 : // should only happen during initial block download.
5782 0 : LogPrintf("Peer=%d%s is stalling block download, disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
5783 0 : pto->fDisconnect = true;
5784 : // Increase timeout for the next peer so that we don't disconnect multiple peers if our own
5785 : // bandwidth is insufficient.
5786 0 : const auto new_timeout = std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX);
5787 0 : if (stalling_timeout != new_timeout && m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
5788 0 : LogPrint(BCLog::NET, "Increased stalling timeout temporarily to %d seconds\n", count_seconds(new_timeout));
5789 0 : }
5790 0 : return true;
5791 : }
5792 : // In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N)
5793 : // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
5794 : // We compensate for other peers to prevent killing off peers due to our own downstream link
5795 : // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
5796 : // to unreasonably increase our timeout.
5797 0 : if (state.vBlocksInFlight.size() > 0) {
5798 0 : QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
5799 0 : int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1;
5800 0 : if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
5801 0 : LogPrintf("Timeout downloading block %s from peer=%d%s, disconnecting\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
5802 0 : pto->fDisconnect = true;
5803 0 : return true;
5804 : }
5805 0 : }
5806 : // Check for headers sync timeouts
5807 0 : if (state.fSyncStarted && peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
5808 : // Detect whether this is a stalling initial-headers-sync peer
5809 0 : if (m_chainman.m_best_header->Time() <= GetAdjustedTime() - 24h) {
5810 0 : if (current_time > peer->m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) {
5811 : // Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer,
5812 : // and we have others we could be using instead.
5813 : // Note: If all our peers are inbound, then we won't
5814 : // disconnect our sync peer for stalling; we have bigger
5815 : // problems if we can't get any outbound peers.
5816 0 : if (!pto->HasPermission(NetPermissionFlags::NoBan)) {
5817 0 : LogPrintf("Timeout downloading headers from peer=%d%s, disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
5818 0 : pto->fDisconnect = true;
5819 0 : return true;
5820 : } else {
5821 0 : LogPrintf("Timeout downloading headers from noban peer=%d%s, not disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
5822 : // Reset the headers sync state so that we have a
5823 : // chance to try downloading from a different peer.
5824 : // Note: this will also result in at least one more
5825 : // getheaders message to be sent to
5826 : // this peer (eventually).
5827 0 : state.fSyncStarted = false;
5828 0 : nSyncStarted--;
5829 0 : peer->m_headers_sync_timeout = 0us;
5830 : }
5831 0 : }
5832 0 : } else {
5833 : // After we've caught up once, reset the timeout so we can't trigger
5834 : // disconnect later.
5835 0 : peer->m_headers_sync_timeout = std::chrono::microseconds::max();
5836 : }
5837 0 : }
5838 :
5839 : // Check that outbound peers have reasonable chains
5840 : // GetTime() is used by this anti-DoS logic so we can test this using mocktime
5841 0 : ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
5842 :
5843 : //
5844 : // Message: getdata (blocks)
5845 : //
5846 0 : std::vector<CInv> vGetData;
5847 0 : if (CanServeBlocks(*peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.IsInitialBlockDownload()) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
5848 0 : std::vector<const CBlockIndex*> vToDownload;
5849 0 : NodeId staller = -1;
5850 0 : FindNextBlocksToDownload(*peer, MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.vBlocksInFlight.size(), vToDownload, staller);
5851 0 : for (const CBlockIndex *pindex : vToDownload) {
5852 0 : uint32_t nFetchFlags = GetFetchFlags(*peer);
5853 0 : vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
5854 0 : BlockRequested(pto->GetId(), *pindex);
5855 0 : LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
5856 : pindex->nHeight, pto->GetId());
5857 : }
5858 0 : if (state.vBlocksInFlight.empty() && staller != -1) {
5859 0 : if (State(staller)->m_stalling_since == 0us) {
5860 0 : State(staller)->m_stalling_since = current_time;
5861 0 : LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
5862 0 : }
5863 0 : }
5864 0 : }
5865 :
5866 : //
5867 : // Message: getdata (transactions)
5868 : //
5869 0 : std::vector<std::pair<NodeId, GenTxid>> expired;
5870 0 : auto requestable = m_txrequest.GetRequestable(pto->GetId(), current_time, &expired);
5871 0 : for (const auto& entry : expired) {
5872 0 : LogPrint(BCLog::NET, "timeout of inflight %s %s from peer=%d\n", entry.second.IsWtxid() ? "wtx" : "tx",
5873 : entry.second.GetHash().ToString(), entry.first);
5874 : }
5875 0 : for (const GenTxid& gtxid : requestable) {
5876 0 : if (!AlreadyHaveTx(gtxid)) {
5877 0 : LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx",
5878 : gtxid.GetHash().ToString(), pto->GetId());
5879 0 : vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*peer)), gtxid.GetHash());
5880 0 : if (vGetData.size() >= MAX_GETDATA_SZ) {
5881 0 : m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
5882 0 : vGetData.clear();
5883 0 : }
5884 0 : m_txrequest.RequestedTx(pto->GetId(), gtxid.GetHash(), current_time + GETDATA_TX_INTERVAL);
5885 0 : } else {
5886 : // We have already seen this transaction, no need to download. This is just a belt-and-suspenders, as
5887 : // this should already be called whenever a transaction becomes AlreadyHaveTx().
5888 0 : m_txrequest.ForgetTxHash(gtxid.GetHash());
5889 : }
5890 : }
5891 :
5892 :
5893 0 : if (!vGetData.empty())
5894 0 : m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
5895 0 : } // release cs_main
5896 0 : MaybeSendFeefilter(*pto, *peer, current_time);
5897 0 : return true;
5898 0 : }
|