/bitcoin/src/node/txdownloadman_impl.cpp
Line | Count | Source |
1 | | // Copyright (c) 2024-present The Bitcoin Core developers |
2 | | // Distributed under the MIT software license, see the accompanying |
3 | | // file COPYING or http://www.opensource.org/licenses/mit-license.php. |
4 | | |
5 | | #include <node/txdownloadman_impl.h> |
6 | | #include <node/txdownloadman.h> |
7 | | |
8 | | #include <chain.h> |
9 | | #include <consensus/validation.h> |
10 | | #include <logging.h> |
11 | | #include <txmempool.h> |
12 | | #include <validation.h> |
13 | | #include <validationinterface.h> |
14 | | |
15 | | namespace node { |
16 | | // TxDownloadManager wrappers |
17 | | TxDownloadManager::TxDownloadManager(const TxDownloadOptions& options) : |
18 | 11.0k | m_impl{std::make_unique<TxDownloadManagerImpl>(options)} |
19 | 11.0k | {} |
20 | 11.0k | TxDownloadManager::~TxDownloadManager() = default; |
21 | | |
22 | | void TxDownloadManager::ActiveTipChange() |
23 | 2.22M | { |
24 | 2.22M | m_impl->ActiveTipChange(); |
25 | 2.22M | } |
26 | | void TxDownloadManager::BlockConnected(const std::shared_ptr<const CBlock>& pblock) |
27 | 2.23M | { |
28 | 2.23M | m_impl->BlockConnected(pblock); |
29 | 2.23M | } |
30 | | void TxDownloadManager::BlockDisconnected() |
31 | 3.89k | { |
32 | 3.89k | m_impl->BlockDisconnected(); |
33 | 3.89k | } |
34 | | void TxDownloadManager::ConnectedPeer(NodeId nodeid, const TxDownloadConnectionInfo& info) |
35 | 88.7k | { |
36 | 88.7k | m_impl->ConnectedPeer(nodeid, info); |
37 | 88.7k | } |
38 | | void TxDownloadManager::DisconnectedPeer(NodeId nodeid) |
39 | 88.7k | { |
40 | 88.7k | m_impl->DisconnectedPeer(nodeid); |
41 | 88.7k | } |
42 | | bool TxDownloadManager::AddTxAnnouncement(NodeId peer, const GenTxid& gtxid, std::chrono::microseconds now) |
43 | 465k | { |
44 | 465k | return m_impl->AddTxAnnouncement(peer, gtxid, now); |
45 | 465k | } |
46 | | std::vector<GenTxid> TxDownloadManager::GetRequestsToSend(NodeId nodeid, std::chrono::microseconds current_time) |
47 | 41.8M | { |
48 | 41.8M | return m_impl->GetRequestsToSend(nodeid, current_time); |
49 | 41.8M | } |
50 | | void TxDownloadManager::ReceivedNotFound(NodeId nodeid, const std::vector<uint256>& txhashes) |
51 | 79 | { |
52 | 79 | m_impl->ReceivedNotFound(nodeid, txhashes); |
53 | 79 | } |
54 | | void TxDownloadManager::MempoolAcceptedTx(const CTransactionRef& tx) |
55 | 38.1k | { |
56 | 38.1k | m_impl->MempoolAcceptedTx(tx); |
57 | 38.1k | } |
58 | | RejectedTxTodo TxDownloadManager::MempoolRejectedTx(const CTransactionRef& ptx, const TxValidationState& state, NodeId nodeid, bool first_time_failure) |
59 | 353k | { |
60 | 353k | return m_impl->MempoolRejectedTx(ptx, state, nodeid, first_time_failure); |
61 | 353k | } |
62 | | void TxDownloadManager::MempoolRejectedPackage(const Package& package) |
63 | 2.45k | { |
64 | 2.45k | m_impl->MempoolRejectedPackage(package); |
65 | 2.45k | } |
66 | | std::pair<bool, std::optional<PackageToValidate>> TxDownloadManager::ReceivedTx(NodeId nodeid, const CTransactionRef& ptx) |
67 | 389k | { |
68 | 389k | return m_impl->ReceivedTx(nodeid, ptx); |
69 | 389k | } |
70 | | bool TxDownloadManager::HaveMoreWork(NodeId nodeid) const |
71 | 5.50M | { |
72 | 5.50M | return m_impl->HaveMoreWork(nodeid); |
73 | 5.50M | } |
74 | | CTransactionRef TxDownloadManager::GetTxToReconsider(NodeId nodeid) |
75 | 42.4M | { |
76 | 42.4M | return m_impl->GetTxToReconsider(nodeid); |
77 | 42.4M | } |
78 | | void TxDownloadManager::CheckIsEmpty() const |
79 | 11.0k | { |
80 | 11.0k | m_impl->CheckIsEmpty(); |
81 | 11.0k | } |
82 | | void TxDownloadManager::CheckIsEmpty(NodeId nodeid) const |
83 | 88.7k | { |
84 | 88.7k | m_impl->CheckIsEmpty(nodeid); |
85 | 88.7k | } |
86 | | std::vector<TxOrphanage::OrphanTxBase> TxDownloadManager::GetOrphanTransactions() const |
87 | 0 | { |
88 | 0 | return m_impl->GetOrphanTransactions(); |
89 | 0 | } |
90 | | |
91 | | // TxDownloadManagerImpl |
92 | | void TxDownloadManagerImpl::ActiveTipChange() |
93 | 2.22M | { |
94 | 2.22M | RecentRejectsFilter().reset(); |
95 | 2.22M | RecentRejectsReconsiderableFilter().reset(); |
96 | 2.22M | } |
97 | | |
98 | | void TxDownloadManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock) |
99 | 2.23M | { |
100 | 2.23M | m_orphanage.EraseForBlock(*pblock); |
101 | | |
102 | 2.25M | for (const auto& ptx : pblock->vtx) { Branch (102:26): [True: 2.25M, False: 2.23M]
|
103 | 2.25M | RecentConfirmedTransactionsFilter().insert(ptx->GetHash().ToUint256()); |
104 | 2.25M | if (ptx->HasWitness()) { Branch (104:13): [True: 2.24M, False: 11.6k]
|
105 | 2.24M | RecentConfirmedTransactionsFilter().insert(ptx->GetWitnessHash().ToUint256()); |
106 | 2.24M | } |
107 | 2.25M | m_txrequest.ForgetTxHash(ptx->GetHash()); |
108 | 2.25M | m_txrequest.ForgetTxHash(ptx->GetWitnessHash()); |
109 | 2.25M | } |
110 | 2.23M | } |
111 | | |
112 | | void TxDownloadManagerImpl::BlockDisconnected() |
113 | 3.89k | { |
114 | | // To avoid relay problems with transactions that were previously |
115 | | // confirmed, clear our filter of recently confirmed transactions whenever |
116 | | // there's a reorg. |
117 | | // This means that in a 1-block reorg (where 1 block is disconnected and |
118 | | // then another block reconnected), our filter will drop to having only one |
119 | | // block's worth of transactions in it, but that should be fine, since |
120 | | // presumably the most common case of relaying a confirmed transaction |
121 | | // should be just after a new block containing it is found. |
122 | 3.89k | RecentConfirmedTransactionsFilter().reset(); |
123 | 3.89k | } |
124 | | |
125 | | bool TxDownloadManagerImpl::AlreadyHaveTx(const GenTxid& gtxid, bool include_reconsiderable) |
126 | 1.49M | { |
127 | 1.49M | const uint256& hash = gtxid.GetHash(); |
128 | | |
129 | 1.49M | if (gtxid.IsWtxid()) { Branch (129:9): [True: 964k, False: 528k]
|
130 | | // Normal query by wtxid. |
131 | 964k | if (m_orphanage.HaveTx(Wtxid::FromUint256(hash))) return true; Branch (131:13): [True: 7.08k, False: 957k]
|
132 | 964k | } else { |
133 | | // Never query by txid: it is possible that the transaction in the orphanage has the same |
134 | | // txid but a different witness, which would give us a false positive result. If we decided |
135 | | // not to request the transaction based on this result, an attacker could prevent us from |
136 | | // downloading a transaction by intentionally creating a malleated version of it. While |
137 | | // only one (or none!) of these transactions can ultimately be confirmed, we have no way of |
138 | | // discerning which one that is, so the orphanage can store multiple transactions with the |
139 | | // same txid. |
140 | | // |
141 | | // While we won't query by txid, we can try to "guess" what the wtxid is based on the txid. |
142 | | // A non-segwit transaction's txid == wtxid. Query this txid "casted" to a wtxid. This will |
143 | | // help us find non-segwit transactions, saving bandwidth, and should have no false positives. |
144 | 528k | if (m_orphanage.HaveTx(Wtxid::FromUint256(hash))) return true; Branch (144:13): [True: 20.6k, False: 507k]
|
145 | 528k | } |
146 | | |
147 | 1.46M | if (include_reconsiderable && RecentRejectsReconsiderableFilter().contains(hash)) return true; Branch (147:9): [True: 434k, False: 1.03M]
Branch (147:35): [True: 727, False: 433k]
|
148 | | |
149 | 1.46M | if (RecentConfirmedTransactionsFilter().contains(hash)) return true; Branch (149:9): [True: 7.43k, False: 1.45M]
|
150 | | |
151 | 1.45M | return RecentRejectsFilter().contains(hash) || m_opts.m_mempool.exists(gtxid); Branch (151:12): [True: 5.61k, False: 1.45M]
Branch (151:52): [True: 7.08k, False: 1.44M]
|
152 | 1.46M | } |
153 | | |
154 | | void TxDownloadManagerImpl::ConnectedPeer(NodeId nodeid, const TxDownloadConnectionInfo& info) |
155 | 88.7k | { |
156 | | // If already connected (shouldn't happen in practice), exit early. |
157 | 88.7k | if (m_peer_info.contains(nodeid)) return; Branch (157:9): [True: 0, False: 88.7k]
|
158 | | |
159 | 88.7k | m_peer_info.try_emplace(nodeid, info); |
160 | 88.7k | if (info.m_wtxid_relay) m_num_wtxid_peers += 1; Branch (160:9): [True: 88.7k, False: 0]
|
161 | 88.7k | } |
162 | | |
163 | | void TxDownloadManagerImpl::DisconnectedPeer(NodeId nodeid) |
164 | 88.7k | { |
165 | 88.7k | m_orphanage.EraseForPeer(nodeid); |
166 | 88.7k | m_txrequest.DisconnectedPeer(nodeid); |
167 | | |
168 | 88.7k | if (auto it = m_peer_info.find(nodeid); it != m_peer_info.end()) { Branch (168:45): [True: 88.7k, False: 0]
|
169 | 88.7k | if (it->second.m_connection_info.m_wtxid_relay) m_num_wtxid_peers -= 1; Branch (169:13): [True: 88.7k, False: 0]
|
170 | 88.7k | m_peer_info.erase(it); |
171 | 88.7k | } |
172 | | |
173 | 88.7k | } |
174 | | |
175 | | bool TxDownloadManagerImpl::AddTxAnnouncement(NodeId peer, const GenTxid& gtxid, std::chrono::microseconds now) |
176 | 465k | { |
177 | | // If this is an orphan we are trying to resolve, consider this peer as a orphan resolution candidate instead. |
178 | | // - is wtxid matching something in orphanage |
179 | | // - exists in orphanage |
180 | | // - peer can be an orphan resolution candidate |
181 | 465k | if (gtxid.IsWtxid()) { Branch (181:9): [True: 413k, False: 51.2k]
|
182 | 413k | const auto wtxid{Wtxid::FromUint256(gtxid.GetHash())}; |
183 | 413k | if (auto orphan_tx{m_orphanage.GetTx(wtxid)}) { Branch (183:18): [True: 28.2k, False: 385k]
|
184 | 28.2k | auto unique_parents{GetUniqueParents(*orphan_tx)}; |
185 | 30.9k | std::erase_if(unique_parents, [&](const auto& txid){ |
186 | 30.9k | return AlreadyHaveTx(GenTxid::Txid(txid), /*include_reconsiderable=*/false); |
187 | 30.9k | }); |
188 | | |
189 | | // The missing parents may have all been rejected or accepted since the orphan was added to the orphanage. |
190 | | // Do not delete from the orphanage, as it may be queued for processing. |
191 | 28.2k | if (unique_parents.empty()) { Branch (191:17): [True: 1.65k, False: 26.5k]
|
192 | 1.65k | return true; |
193 | 1.65k | } |
194 | | |
195 | 26.5k | if (MaybeAddOrphanResolutionCandidate(unique_parents, wtxid, peer, now)) { Branch (195:17): [True: 10.1k, False: 16.4k]
|
196 | 10.1k | m_orphanage.AddAnnouncer(orphan_tx->GetWitnessHash(), peer); |
197 | 10.1k | } |
198 | | |
199 | | // Return even if the peer isn't an orphan resolution candidate. This would be caught by AlreadyHaveTx. |
200 | 26.5k | return true; |
201 | 28.2k | } |
202 | 413k | } |
203 | | |
204 | | // If this is an inv received from a peer and we already have it, we can drop it. |
205 | 436k | if (AlreadyHaveTx(gtxid, /*include_reconsiderable=*/true)) return true; Branch (205:9): [True: 12.8k, False: 424k]
|
206 | | |
207 | 424k | auto it = m_peer_info.find(peer); |
208 | 424k | if (it == m_peer_info.end()) return false; Branch (208:9): [True: 0, False: 424k]
|
209 | 424k | const auto& info = it->second.m_connection_info; |
210 | 424k | if (!info.m_relay_permissions && m_txrequest.Count(peer) >= MAX_PEER_TX_ANNOUNCEMENTS) { Branch (210:9): [True: 424k, False: 0]
Branch (210:38): [True: 0, False: 424k]
|
211 | | // Too many queued announcements for this peer |
212 | 0 | return false; |
213 | 0 | } |
214 | | // Decide the TxRequestTracker parameters for this announcement: |
215 | | // - "preferred": if fPreferredDownload is set (= outbound, or NetPermissionFlags::NoBan permission) |
216 | | // - "reqtime": current time plus delays for: |
217 | | // - NONPREF_PEER_TX_DELAY for announcements from non-preferred connections |
218 | | // - TXID_RELAY_DELAY for txid announcements while wtxid peers are available |
219 | | // - OVERLOADED_PEER_TX_DELAY for announcements from peers which have at least |
220 | | // MAX_PEER_TX_REQUEST_IN_FLIGHT requests in flight (and don't have NetPermissionFlags::Relay). |
221 | 424k | auto delay{0us}; |
222 | 424k | if (!info.m_preferred) delay += NONPREF_PEER_TX_DELAY; Branch (222:9): [True: 209k, False: 214k]
|
223 | 424k | if (!gtxid.IsWtxid() && m_num_wtxid_peers > 0) delay += TXID_RELAY_DELAY; Branch (223:9): [True: 45.9k, False: 378k]
Branch (223:29): [True: 45.9k, False: 0]
|
224 | 424k | const bool overloaded = !info.m_relay_permissions && m_txrequest.CountInFlight(peer) >= MAX_PEER_TX_REQUEST_IN_FLIGHT; Branch (224:29): [True: 424k, False: 0]
Branch (224:58): [True: 1.30k, False: 422k]
|
225 | 424k | if (overloaded) delay += OVERLOADED_PEER_TX_DELAY; Branch (225:9): [True: 1.30k, False: 422k]
|
226 | | |
227 | 424k | m_txrequest.ReceivedInv(peer, gtxid, info.m_preferred, now + delay); |
228 | | |
229 | 424k | return false; |
230 | 424k | } |
231 | | |
232 | | bool TxDownloadManagerImpl::MaybeAddOrphanResolutionCandidate(const std::vector<Txid>& unique_parents, const Wtxid& wtxid, NodeId nodeid, std::chrono::microseconds now) |
233 | 329k | { |
234 | 329k | auto it_peer = m_peer_info.find(nodeid); |
235 | 329k | if (it_peer == m_peer_info.end()) return false; Branch (235:9): [True: 0, False: 329k]
|
236 | 329k | if (m_orphanage.HaveTxFromPeer(wtxid, nodeid)) return false; Branch (236:9): [True: 16.8k, False: 313k]
|
237 | | |
238 | 313k | const auto& peer_entry = m_peer_info.at(nodeid); |
239 | 313k | const auto& info = peer_entry.m_connection_info; |
240 | | |
241 | | // TODO: add delays and limits based on the amount of orphan resolution we are already doing |
242 | | // with this peer, how much they are using the orphanage, etc. |
243 | 313k | if (!info.m_relay_permissions) { Branch (243:9): [True: 313k, False: 0]
|
244 | | // This mirrors the delaying and dropping behavior in AddTxAnnouncement in order to preserve |
245 | | // existing behavior: drop if we are tracking too many invs for this peer already. Each |
246 | | // orphan resolution involves at least 1 transaction request which may or may not be |
247 | | // currently tracked in m_txrequest, so we include that in the count. |
248 | 313k | if (m_txrequest.Count(nodeid) + unique_parents.size() > MAX_PEER_TX_ANNOUNCEMENTS) return false; Branch (248:13): [True: 0, False: 313k]
|
249 | 313k | } |
250 | | |
251 | 313k | std::chrono::seconds delay{0s}; |
252 | 313k | if (!info.m_preferred) delay += NONPREF_PEER_TX_DELAY; Branch (252:9): [True: 156k, False: 156k]
|
253 | | // The orphan wtxid is used, but resolution entails requesting the parents by txid. Sometimes |
254 | | // parent and child are announced and thus requested around the same time, and we happen to |
255 | | // receive child sooner. Waiting a few seconds may allow us to cancel the orphan resolution |
256 | | // request if the parent arrives in that time. |
257 | 313k | if (m_num_wtxid_peers > 0) delay += TXID_RELAY_DELAY; Branch (257:9): [True: 313k, False: 0]
|
258 | 313k | const bool overloaded = !info.m_relay_permissions && m_txrequest.CountInFlight(nodeid) >= MAX_PEER_TX_REQUEST_IN_FLIGHT; Branch (258:29): [True: 313k, False: 0]
Branch (258:58): [True: 1.12k, False: 311k]
|
259 | 313k | if (overloaded) delay += OVERLOADED_PEER_TX_DELAY; Branch (259:9): [True: 1.12k, False: 311k]
|
260 | | |
261 | | // Treat finding orphan resolution candidate as equivalent to the peer announcing all missing parents. |
262 | | // In the future, orphan resolution may include more explicit steps |
263 | 321k | for (const auto& parent_txid : unique_parents) { Branch (263:34): [True: 321k, False: 313k]
|
264 | 321k | m_txrequest.ReceivedInv(nodeid, GenTxid::Txid(parent_txid), info.m_preferred, now + delay); |
265 | 321k | } |
266 | 313k | LogDebug(BCLog::TXPACKAGES, "added peer=%d as a candidate for resolving orphan %s\n", nodeid, wtxid.ToString()); |
267 | 313k | return true; |
268 | 313k | } |
269 | | |
270 | | std::vector<GenTxid> TxDownloadManagerImpl::GetRequestsToSend(NodeId nodeid, std::chrono::microseconds current_time) |
271 | 41.8M | { |
272 | 41.8M | std::vector<GenTxid> requests; |
273 | 41.8M | std::vector<std::pair<NodeId, GenTxid>> expired; |
274 | 41.8M | auto requestable = m_txrequest.GetRequestable(nodeid, current_time, &expired); |
275 | 41.8M | for (const auto& entry : expired) { Branch (275:28): [True: 46.6k, False: 41.8M]
|
276 | 46.6k | LogDebug(BCLog::NET, "timeout of inflight %s %s from peer=%d\n", entry.second.IsWtxid() ? "wtx" : "tx", |
277 | 46.6k | entry.second.GetHash().ToString(), entry.first); |
278 | 46.6k | } |
279 | 41.8M | for (const GenTxid& gtxid : requestable) { Branch (279:31): [True: 310k, False: 41.8M]
|
280 | 310k | if (!AlreadyHaveTx(gtxid, /*include_reconsiderable=*/false)) { Branch (280:13): [True: 310k, False: 7]
|
281 | 310k | LogDebug(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx", |
282 | 310k | gtxid.GetHash().ToString(), nodeid); |
283 | 310k | requests.emplace_back(gtxid); |
284 | 310k | m_txrequest.RequestedTx(nodeid, gtxid.GetHash(), current_time + GETDATA_TX_INTERVAL); |
285 | 310k | } else { |
286 | | // We have already seen this transaction, no need to download. This is just a belt-and-suspenders, as |
287 | | // this should already be called whenever a transaction becomes AlreadyHaveTx(). |
288 | 7 | m_txrequest.ForgetTxHash(gtxid.GetHash()); |
289 | 7 | } |
290 | 310k | } |
291 | 41.8M | return requests; |
292 | 41.8M | } |
293 | | |
294 | | void TxDownloadManagerImpl::ReceivedNotFound(NodeId nodeid, const std::vector<uint256>& txhashes) |
295 | 79 | { |
296 | 79 | for (const auto& txhash : txhashes) { Branch (296:29): [True: 32, False: 79]
|
297 | | // If we receive a NOTFOUND message for a tx we requested, mark the announcement for it as |
298 | | // completed in TxRequestTracker. |
299 | 32 | m_txrequest.ReceivedResponse(nodeid, txhash); |
300 | 32 | } |
301 | 79 | } |
302 | | |
303 | | std::optional<PackageToValidate> TxDownloadManagerImpl::Find1P1CPackage(const CTransactionRef& ptx, NodeId nodeid) |
304 | 4.76k | { |
305 | 4.76k | const auto& parent_wtxid{ptx->GetWitnessHash()}; |
306 | | |
307 | 4.76k | Assume(RecentRejectsReconsiderableFilter().contains(parent_wtxid.ToUint256())); |
308 | | |
309 | | // Only consider children from this peer. This helps prevent censorship attempts in which an attacker |
310 | | // sends lots of fake children for the parent, and we (unluckily) keep selecting the fake |
311 | | // children instead of the real one provided by the honest peer. Since we track all announcers |
312 | | // of an orphan, this does not exclude parent + orphan pairs that we happened to request from |
313 | | // different peers. |
314 | 4.76k | const auto cpfp_candidates_same_peer{m_orphanage.GetChildrenFromSamePeer(ptx, nodeid)}; |
315 | | |
316 | | // These children should be sorted from newest to oldest. In the (probably uncommon) case |
317 | | // of children that replace each other, this helps us accept the highest feerate (probably the |
318 | | // most recent) one efficiently. |
319 | 4.76k | for (const auto& child : cpfp_candidates_same_peer) { Branch (319:28): [True: 2.80k, False: 2.23k]
|
320 | 2.80k | Package maybe_cpfp_package{ptx, child}; |
321 | 2.80k | if (!RecentRejectsReconsiderableFilter().contains(GetPackageHash(maybe_cpfp_package)) && Branch (321:13): [True: 2.54k, False: 262]
Branch (321:13): [True: 2.53k, False: 270]
|
322 | 2.80k | !RecentRejectsFilter().contains(child->GetHash().ToUint256())) { Branch (322:13): [True: 2.53k, False: 8]
|
323 | 2.53k | return PackageToValidate{ptx, child, nodeid, nodeid}; |
324 | 2.53k | } |
325 | 2.80k | } |
326 | 2.23k | return std::nullopt; |
327 | 4.76k | } |
328 | | |
329 | | void TxDownloadManagerImpl::MempoolAcceptedTx(const CTransactionRef& tx) |
330 | 38.1k | { |
331 | | // As this version of the transaction was acceptable, we can forget about any requests for it. |
332 | | // No-op if the tx is not in txrequest. |
333 | 38.1k | m_txrequest.ForgetTxHash(tx->GetHash()); |
334 | 38.1k | m_txrequest.ForgetTxHash(tx->GetWitnessHash()); |
335 | | |
336 | 38.1k | m_orphanage.AddChildrenToWorkSet(*tx, m_opts.m_rng); |
337 | | // If it came from the orphanage, remove it. No-op if the tx is not in txorphanage. |
338 | 38.1k | m_orphanage.EraseTx(tx->GetWitnessHash()); |
339 | 38.1k | } |
340 | | |
341 | | std::vector<Txid> TxDownloadManagerImpl::GetUniqueParents(const CTransaction& tx) |
342 | 351k | { |
343 | 351k | std::vector<Txid> unique_parents; |
344 | 351k | unique_parents.reserve(tx.vin.size()); |
345 | 416k | for (const CTxIn& txin : tx.vin) { Branch (345:28): [True: 416k, False: 351k]
|
346 | | // We start with all parents, and then remove duplicates below. |
347 | 416k | unique_parents.push_back(txin.prevout.hash); |
348 | 416k | } |
349 | | |
350 | 351k | std::sort(unique_parents.begin(), unique_parents.end()); |
351 | 351k | unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end()); |
352 | | |
353 | 351k | return unique_parents; |
354 | 351k | } |
355 | | |
356 | | node::RejectedTxTodo TxDownloadManagerImpl::MempoolRejectedTx(const CTransactionRef& ptx, const TxValidationState& state, NodeId nodeid, bool first_time_failure) |
357 | 353k | { |
358 | 353k | const CTransaction& tx{*ptx}; |
359 | | // Results returned to caller |
360 | | // Whether we should call AddToCompactExtraTransactions at the end |
361 | 353k | bool add_extra_compact_tx{first_time_failure}; |
362 | | // Hashes to pass to AddKnownTx later |
363 | 353k | std::vector<Txid> unique_parents; |
364 | | // Populated if failure is reconsiderable and eligible package is found. |
365 | 353k | std::optional<node::PackageToValidate> package_to_validate; |
366 | | |
367 | 353k | if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) { Branch (367:9): [True: 324k, False: 29.0k]
|
368 | | // Only process a new orphan if this is a first time failure, as otherwise it must be either |
369 | | // already in orphanage or from 1p1c processing. |
370 | 324k | if (first_time_failure && !RecentRejectsFilter().contains(ptx->GetWitnessHash().ToUint256())) { Branch (370:13): [True: 322k, False: 1.20k]
Branch (370:35): [True: 322k, False: 0]
|
371 | 322k | bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected |
372 | | |
373 | | // Deduplicate parent txids, so that we don't have to loop over |
374 | | // the same parent txid more than once down below. |
375 | 322k | unique_parents = GetUniqueParents(tx); |
376 | | |
377 | | // Distinguish between parents in m_lazy_recent_rejects and m_lazy_recent_rejects_reconsiderable. |
378 | | // We can tolerate having up to 1 parent in m_lazy_recent_rejects_reconsiderable since we |
379 | | // submit 1p1c packages. However, fail immediately if any are in m_lazy_recent_rejects. |
380 | 322k | std::optional<uint256> rejected_parent_reconsiderable; |
381 | 357k | for (const uint256& parent_txid : unique_parents) { Branch (381:45): [True: 357k, False: 295k]
|
382 | 357k | if (RecentRejectsFilter().contains(parent_txid)) { Branch (382:21): [True: 27.6k, False: 329k]
|
383 | 27.6k | fRejectedParents = true; |
384 | 27.6k | break; |
385 | 329k | } else if (RecentRejectsReconsiderableFilter().contains(parent_txid) && Branch (385:28): [True: 560, False: 328k]
Branch (385:28): [True: 549, False: 328k]
|
386 | 329k | !m_opts.m_mempool.exists(GenTxid::Txid(parent_txid))) { Branch (386:28): [True: 549, False: 11]
|
387 | | // More than 1 parent in m_lazy_recent_rejects_reconsiderable: 1p1c will not be |
388 | | // sufficient to accept this package, so just give up here. |
389 | 549 | if (rejected_parent_reconsiderable.has_value()) { Branch (389:25): [True: 14, False: 535]
|
390 | 14 | fRejectedParents = true; |
391 | 14 | break; |
392 | 14 | } |
393 | 535 | rejected_parent_reconsiderable = parent_txid; |
394 | 535 | } |
395 | 357k | } |
396 | 322k | if (!fRejectedParents) { Branch (396:17): [True: 295k, False: 27.6k]
|
397 | | // Filter parents that we already have. |
398 | | // Exclude m_lazy_recent_rejects_reconsiderable: the missing parent may have been |
399 | | // previously rejected for being too low feerate. This orphan might CPFP it. |
400 | 325k | std::erase_if(unique_parents, [&](const auto& txid){ |
401 | 325k | return AlreadyHaveTx(GenTxid::Txid(txid), /*include_reconsiderable=*/false); |
402 | 325k | }); |
403 | 295k | const auto now{GetTime<std::chrono::microseconds>()}; |
404 | 295k | const auto& wtxid = ptx->GetWitnessHash(); |
405 | | // Potentially flip add_extra_compact_tx to false if tx is already in orphanage, which |
406 | | // means it was already added to vExtraTxnForCompact. |
407 | 295k | add_extra_compact_tx &= !m_orphanage.HaveTx(wtxid); |
408 | | |
409 | | // If there is no candidate for orphan resolution, AddTx will not be called. This means |
410 | | // that if a peer is overloading us with invs and orphans, they will eventually not be |
411 | | // able to add any more transactions to the orphanage. |
412 | | // |
413 | | // Search by txid and, if the tx has a witness, wtxid |
414 | 295k | std::vector<NodeId> orphan_resolution_candidates{nodeid}; |
415 | 295k | m_txrequest.GetCandidatePeers(ptx->GetHash().ToUint256(), orphan_resolution_candidates); |
416 | 295k | if (ptx->HasWitness()) m_txrequest.GetCandidatePeers(ptx->GetWitnessHash().ToUint256(), orphan_resolution_candidates); Branch (416:21): [True: 256k, False: 39.1k]
|
417 | | |
418 | 303k | for (const auto& nodeid : orphan_resolution_candidates) { Branch (418:41): [True: 303k, False: 295k]
|
419 | 303k | if (MaybeAddOrphanResolutionCandidate(unique_parents, ptx->GetWitnessHash(), nodeid, now)) { Branch (419:25): [True: 302k, False: 350]
|
420 | 302k | m_orphanage.AddTx(ptx, nodeid); |
421 | 302k | } |
422 | 303k | } |
423 | | |
424 | | // Once added to the orphan pool, a tx is considered AlreadyHave, and we shouldn't request it anymore. |
425 | 295k | m_txrequest.ForgetTxHash(tx.GetHash()); |
426 | 295k | m_txrequest.ForgetTxHash(tx.GetWitnessHash()); |
427 | | |
428 | | // DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789) |
429 | | // Note that, if the orphanage reaches capacity, it's possible that we immediately evict |
430 | | // the transaction we just added. |
431 | 295k | m_orphanage.LimitOrphans(m_opts.m_max_orphan_txs, m_opts.m_rng); |
432 | 295k | } else { |
433 | 27.6k | unique_parents.clear(); |
434 | 27.6k | LogDebug(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s (wtxid=%s)\n", |
435 | 27.6k | tx.GetHash().ToString(), |
436 | 27.6k | tx.GetWitnessHash().ToString()); |
437 | | // We will continue to reject this tx since it has rejected |
438 | | // parents so avoid re-requesting it from other peers. |
439 | | // Here we add both the txid and the wtxid, as we know that |
440 | | // regardless of what witness is provided, we will not accept |
441 | | // this, so we don't need to allow for redownload of this txid |
442 | | // from any of our non-wtxidrelay peers. |
443 | 27.6k | RecentRejectsFilter().insert(tx.GetHash().ToUint256()); |
444 | 27.6k | RecentRejectsFilter().insert(tx.GetWitnessHash().ToUint256()); |
445 | 27.6k | m_txrequest.ForgetTxHash(tx.GetHash()); |
446 | 27.6k | m_txrequest.ForgetTxHash(tx.GetWitnessHash()); |
447 | 27.6k | } |
448 | 322k | } |
449 | 324k | } else if (state.GetResult() == TxValidationResult::TX_WITNESS_STRIPPED) { Branch (449:16): [True: 982, False: 28.0k]
|
450 | 982 | add_extra_compact_tx = false; |
451 | 28.0k | } else { |
452 | | // We can add the wtxid of this transaction to our reject filter. |
453 | | // Do not add txids of witness transactions or witness-stripped |
454 | | // transactions to the filter, as they can have been malleated; |
455 | | // adding such txids to the reject filter would potentially |
456 | | // interfere with relay of valid transactions from peers that |
457 | | // do not support wtxid-based relay. See |
458 | | // https://github.com/bitcoin/bitcoin/issues/8279 for details. |
459 | | // We can remove this restriction (and always add wtxids to |
460 | | // the filter even for witness stripped transactions) once |
461 | | // wtxid-based relay is broadly deployed. |
462 | | // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034 |
463 | | // for concerns around weakening security of unupgraded nodes |
464 | | // if we start doing this too early. |
465 | 28.0k | if (state.GetResult() == TxValidationResult::TX_RECONSIDERABLE) { Branch (465:13): [True: 6.25k, False: 21.7k]
|
466 | | // If the result is TX_RECONSIDERABLE, add it to m_lazy_recent_rejects_reconsiderable |
467 | | // because we should not download or submit this transaction by itself again, but may |
468 | | // submit it as part of a package later. |
469 | 6.25k | RecentRejectsReconsiderableFilter().insert(ptx->GetWitnessHash().ToUint256()); |
470 | | |
471 | 6.25k | if (first_time_failure) { Branch (471:17): [True: 4.23k, False: 2.01k]
|
472 | | // When a transaction fails for TX_RECONSIDERABLE, look for a matching child in the |
473 | | // orphanage, as it is possible that they succeed as a package. |
474 | 4.23k | LogDebug(BCLog::TXPACKAGES, "tx %s (wtxid=%s) failed but reconsiderable, looking for child in orphanage\n", |
475 | 4.23k | ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString()); |
476 | 4.23k | package_to_validate = Find1P1CPackage(ptx, nodeid); |
477 | 4.23k | } |
478 | 21.7k | } else { |
479 | 21.7k | RecentRejectsFilter().insert(ptx->GetWitnessHash().ToUint256()); |
480 | 21.7k | } |
481 | 28.0k | m_txrequest.ForgetTxHash(ptx->GetWitnessHash()); |
482 | | // If the transaction failed for TX_INPUTS_NOT_STANDARD, |
483 | | // then we know that the witness was irrelevant to the policy |
484 | | // failure, since this check depends only on the txid |
485 | | // (the scriptPubKey being spent is covered by the txid). |
486 | | // Add the txid to the reject filter to prevent repeated |
487 | | // processing of this transaction in the event that child |
488 | | // transactions are later received (resulting in |
489 | | // parent-fetching by txid via the orphan-handling logic). |
490 | | // We only add the txid if it differs from the wtxid, to avoid wasting entries in the |
491 | | // rolling bloom filter. |
492 | 28.0k | if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && ptx->HasWitness()) { Branch (492:13): [True: 48, False: 27.9k]
Branch (492:80): [True: 7, False: 41]
|
493 | 7 | RecentRejectsFilter().insert(ptx->GetHash().ToUint256()); |
494 | 7 | m_txrequest.ForgetTxHash(ptx->GetHash()); |
495 | 7 | } |
496 | 28.0k | } |
497 | | |
498 | | // If the tx failed in ProcessOrphanTx, it should be removed from the orphanage unless the |
499 | | // tx was still missing inputs. If the tx was not in the orphanage, EraseTx does nothing and returns 0. |
500 | 353k | if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS && m_orphanage.EraseTx(ptx->GetWitnessHash()) > 0) { Branch (500:9): [True: 29.0k, False: 324k]
Branch (500:71): [True: 6.40k, False: 22.5k]
|
501 | 6.40k | LogDebug(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s)\n", ptx->GetHash().ToString(), ptx->GetWitnessHash().ToString()); |
502 | 6.40k | } |
503 | | |
504 | 353k | return RejectedTxTodo{ |
505 | 353k | .m_should_add_extra_compact_tx = add_extra_compact_tx, |
506 | 353k | .m_unique_parents = std::move(unique_parents), |
507 | 353k | .m_package_to_validate = std::move(package_to_validate) |
508 | 353k | }; |
509 | 353k | } |
510 | | |
511 | | void TxDownloadManagerImpl::MempoolRejectedPackage(const Package& package) |
512 | 2.45k | { |
513 | 2.45k | RecentRejectsReconsiderableFilter().insert(GetPackageHash(package)); |
514 | 2.45k | } |
515 | | |
516 | | std::pair<bool, std::optional<PackageToValidate>> TxDownloadManagerImpl::ReceivedTx(NodeId nodeid, const CTransactionRef& ptx) |
517 | 389k | { |
518 | 389k | const uint256& txid = ptx->GetHash(); |
519 | 389k | const uint256& wtxid = ptx->GetWitnessHash(); |
520 | | |
521 | | // Mark that we have received a response |
522 | 389k | m_txrequest.ReceivedResponse(nodeid, txid); |
523 | 389k | if (ptx->HasWitness()) m_txrequest.ReceivedResponse(nodeid, wtxid); Branch (523:9): [True: 339k, False: 49.1k]
|
524 | | |
525 | | // First check if we should drop this tx. |
526 | | // We do the AlreadyHaveTx() check using wtxid, rather than txid - in the |
527 | | // absence of witness malleation, this is strictly better, because the |
528 | | // recent rejects filter may contain the wtxid but rarely contains |
529 | | // the txid of a segwit transaction that has been rejected. |
530 | | // In the presence of witness malleation, it's possible that by only |
531 | | // doing the check with wtxid, we could overlook a transaction which |
532 | | // was confirmed with a different witness, or exists in our mempool |
533 | | // with a different witness, but this has limited downside: |
534 | | // mempool validation does its own lookup of whether we have the txid |
535 | | // already; and an adversary can already relay us old transactions |
536 | | // (older than our recency filter) if trying to DoS us, without any need |
537 | | // for witness malleation. |
538 | 389k | if (AlreadyHaveTx(GenTxid::Wtxid(wtxid), /*include_reconsiderable=*/true)) { Branch (538:9): [True: 9.76k, False: 379k]
|
539 | | |
540 | | if (RecentRejectsReconsiderableFilter().contains(wtxid)) { |
541 | | // When a transaction is already in m_lazy_recent_rejects_reconsiderable, we shouldn't submit |
542 | | // it by itself again. However, look for a matching child in the orphanage, as it is |
543 | | // possible that they succeed as a package. |
544 | | LogDebug(BCLog::TXPACKAGES, "found tx %s (wtxid=%s) in reconsiderable rejects, looking for child in orphanage\n", |
545 | | txid.ToString(), wtxid.ToString()); |
546 | | return std::make_pair(false, Find1P1CPackage(ptx, nodeid)); |
547 | | } |
548 | | |
549 | | // If a tx is detected by m_lazy_recent_rejects it is ignored. Because we haven't |
550 | | // submitted the tx to our mempool, we won't have computed a DoS |
551 | | // score for it or determined exactly why we consider it invalid. |
552 | | // |
553 | | // This means we won't penalize any peer subsequently relaying a DoSy |
554 | 9.76k | // tx (even if we penalized the first peer who gave it to us) because |
555 | 379k | // we have to account for m_lazy_recent_rejects showing false positives. In Branch (555:16): [True: 532, False: 378k]
|
556 | | // other words, we shouldn't penalize a peer if we aren't *sure* they |
557 | | // submitted a DoSy tx. |
558 | | // |
559 | 532 | // Note that m_lazy_recent_rejects doesn't just record DoSy or invalid |
560 | 532 | // transactions, but any tx not accepted by the mempool, which may be |
561 | 532 | // due to node policy (vs. consensus). So we can't blanket penalize a |
562 | 532 | // peer simply for relaying a tx that our m_lazy_recent_rejects has caught, |
563 | | // regardless of false positives. |
564 | | return {false, std::nullopt}; |
565 | 378k | } |
566 | 389k | |
567 | | return {true, std::nullopt}; |
568 | | } |
569 | 5.50M | |
570 | 5.50M | bool TxDownloadManagerImpl::HaveMoreWork(NodeId nodeid) |
571 | 5.50M | { |
572 | | return m_orphanage.HaveTxToReconsider(nodeid); |
573 | | } |
574 | 42.4M | |
575 | 42.4M | CTransactionRef TxDownloadManagerImpl::GetTxToReconsider(NodeId nodeid) |
576 | 42.4M | { |
577 | | return m_orphanage.GetTxToReconsider(nodeid); |
578 | | } |
579 | 88.7k | |
580 | 88.7k | void TxDownloadManagerImpl::CheckIsEmpty(NodeId nodeid) Branch (580:5): [True: 88.7k, False: 0]
|
581 | 88.7k | { Branch (581:5): [True: 88.7k, False: 0]
|
582 | 88.7k | assert(m_txrequest.Count(nodeid) == 0); |
583 | | assert(m_orphanage.UsageByPeer(nodeid) == 0); |
584 | 11.0k | } |
585 | 11.0k | void TxDownloadManagerImpl::CheckIsEmpty() Branch (585:5): [True: 11.0k, False: 0]
|
586 | 11.0k | { Branch (586:5): [True: 11.0k, False: 0]
|
587 | 11.0k | assert(m_orphanage.TotalOrphanUsage() == 0); Branch (587:5): [True: 11.0k, False: 0]
|
588 | 11.0k | assert(m_orphanage.Size() == 0); Branch (588:5): [True: 11.0k, False: 0]
|
589 | 11.0k | assert(m_txrequest.Size() == 0); |
590 | | assert(m_num_wtxid_peers == 0); |
591 | 0 | } |
592 | 0 | std::vector<TxOrphanage::OrphanTxBase> TxDownloadManagerImpl::GetOrphanTransactions() const |
593 | 0 | { |
594 | | return m_orphanage.GetOrphanTransactions(); |
595 | | } |
596 | | } // namespace node |