Branch data Line data Source code
1 : : // Copyright (c) 2016-2022 The Bitcoin Core developers
2 : : // Distributed under the MIT software license, see the accompanying
3 : : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 : :
5 : : #include <blockencodings.h>
6 : : #include <chainparams.h>
7 : : #include <common/system.h>
8 : : #include <consensus/consensus.h>
9 : : #include <consensus/validation.h>
10 : : #include <crypto/sha256.h>
11 : : #include <crypto/siphash.h>
12 : : #include <logging.h>
13 : : #include <random.h>
14 : : #include <streams.h>
15 : : #include <txmempool.h>
16 : : #include <validation.h>
17 [ + - ]: 2 :
18 [ + - ]: 2 : #include <unordered_map>
19 : :
20 : 0 : CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block) :
21 : 0 : nonce(GetRand<uint64_t>()),
22 [ # # ][ # # ]: 0 : shorttxids(block.vtx.size() - 1), prefilledtxn(1), header(block) {
23 [ # # ]: 0 : FillShortTxIDSelector();
24 : : //TODO: Use our mempool prior to block acceptance to predictively fill more than just the coinbase
25 : 0 : prefilledtxn[0] = {0, block.vtx[0]};
26 [ # # ]: 0 : for (size_t i = 1; i < block.vtx.size(); i++) {
27 : 0 : const CTransaction& tx = *block.vtx[i];
28 [ # # ][ # # ]: 0 : shorttxids[i - 1] = GetShortID(tx.GetWitnessHash());
29 : 0 : }
30 : 0 : }
31 : :
32 : 0 : void CBlockHeaderAndShortTxIDs::FillShortTxIDSelector() const {
33 : 0 : DataStream stream{};
34 [ # # ][ # # ]: 0 : stream << header << nonce;
35 [ # # ]: 0 : CSHA256 hasher;
36 [ # # ][ # # ]: 0 : hasher.Write((unsigned char*)&(*stream.begin()), stream.end() - stream.begin());
[ # # ][ # # ]
37 [ # # ]: 0 : uint256 shorttxidhash;
38 [ # # ][ # # ]: 0 : hasher.Finalize(shorttxidhash.begin());
39 [ # # ]: 0 : shorttxidk0 = shorttxidhash.GetUint64(0);
40 [ # # ]: 0 : shorttxidk1 = shorttxidhash.GetUint64(1);
41 : 0 : }
42 : :
43 : 0 : uint64_t CBlockHeaderAndShortTxIDs::GetShortID(const uint256& txhash) const {
44 : : static_assert(SHORTTXIDS_LENGTH == 6, "shorttxids calculation assumes 6-byte shorttxids");
45 : 0 : return SipHashUint256(shorttxidk0, shorttxidk1, txhash) & 0xffffffffffffL;
46 : : }
47 : :
48 : :
49 : :
50 : 0 : ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& cmpctblock, const std::vector<std::pair<uint256, CTransactionRef>>& extra_txn) {
51 [ # # ][ # # ]: 0 : if (cmpctblock.header.IsNull() || (cmpctblock.shorttxids.empty() && cmpctblock.prefilledtxn.empty()))
[ # # ]
52 : 0 : return READ_STATUS_INVALID;
53 [ # # ]: 0 : if (cmpctblock.shorttxids.size() + cmpctblock.prefilledtxn.size() > MAX_BLOCK_WEIGHT / MIN_SERIALIZABLE_TRANSACTION_WEIGHT)
54 : 0 : return READ_STATUS_INVALID;
55 : :
56 [ # # ][ # # ]: 0 : if (!header.IsNull() || !txn_available.empty()) return READ_STATUS_INVALID;
57 : :
58 : 0 : header = cmpctblock.header;
59 : 0 : txn_available.resize(cmpctblock.BlockTxCount());
60 : :
61 : 0 : int32_t lastprefilledindex = -1;
62 [ # # ]: 0 : for (size_t i = 0; i < cmpctblock.prefilledtxn.size(); i++) {
63 [ # # ]: 0 : if (cmpctblock.prefilledtxn[i].tx->IsNull())
64 : 0 : return READ_STATUS_INVALID;
65 : :
66 : 0 : lastprefilledindex += cmpctblock.prefilledtxn[i].index + 1; //index is a uint16_t, so can't overflow here
67 [ # # ]: 0 : if (lastprefilledindex > std::numeric_limits<uint16_t>::max())
68 : 0 : return READ_STATUS_INVALID;
69 [ # # ]: 0 : if ((uint32_t)lastprefilledindex > cmpctblock.shorttxids.size() + i) {
70 : : // If we are inserting a tx at an index greater than our full list of shorttxids
71 : : // plus the number of prefilled txn we've inserted, then we have txn for which we
72 : : // have neither a prefilled txn or a shorttxid!
73 : 0 : return READ_STATUS_INVALID;
74 : : }
75 : 0 : txn_available[lastprefilledindex] = cmpctblock.prefilledtxn[i].tx;
76 : 0 : }
77 : 0 : prefilled_count = cmpctblock.prefilledtxn.size();
78 : :
79 : : // Calculate map of txids -> positions and check mempool to see what we have (or don't)
80 : : // Because well-formed cmpctblock messages will have a (relatively) uniform distribution
81 : : // of short IDs, any highly-uneven distribution of elements can be safely treated as a
82 : : // READ_STATUS_FAILED.
83 [ # # ]: 0 : std::unordered_map<uint64_t, uint16_t> shorttxids(cmpctblock.shorttxids.size());
84 : 0 : uint16_t index_offset = 0;
85 [ # # ]: 0 : for (size_t i = 0; i < cmpctblock.shorttxids.size(); i++) {
86 [ # # ]: 0 : while (txn_available[i + index_offset])
87 : 0 : index_offset++;
88 [ # # ]: 0 : shorttxids[cmpctblock.shorttxids[i]] = i + index_offset;
89 : : // To determine the chance that the number of entries in a bucket exceeds N,
90 : : // we use the fact that the number of elements in a single bucket is
91 : : // binomially distributed (with n = the number of shorttxids S, and p =
92 : : // 1 / the number of buckets), that in the worst case the number of buckets is
93 : : // equal to S (due to std::unordered_map having a default load factor of 1.0),
94 : : // and that the chance for any bucket to exceed N elements is at most
95 : : // buckets * (the chance that any given bucket is above N elements).
96 : : // Thus: P(max_elements_per_bucket > N) <= S * (1 - cdf(binomial(n=S,p=1/S), N)).
97 : : // If we assume blocks of up to 16000, allowing 12 elements per bucket should
98 : : // only fail once per ~1 million block transfers (per peer and connection).
99 [ # # ][ # # ]: 0 : if (shorttxids.bucket_size(shorttxids.bucket(cmpctblock.shorttxids[i])) > 12)
[ # # ]
100 : 0 : return READ_STATUS_FAILED;
101 : 0 : }
102 : : // TODO: in the shortid-collision case, we should instead request both transactions
103 : : // which collided. Falling back to full-block-request here is overkill.
104 [ # # ]: 0 : if (shorttxids.size() != cmpctblock.shorttxids.size())
105 : 0 : return READ_STATUS_FAILED; // Short ID collision
106 : :
107 [ # # ]: 0 : std::vector<bool> have_txn(txn_available.size());
108 : : {
109 [ # # ][ # # ]: 0 : LOCK(pool->cs);
110 [ # # ]: 0 : for (size_t i = 0; i < pool->vTxHashes.size(); i++) {
111 [ # # ]: 0 : uint64_t shortid = cmpctblock.GetShortID(pool->vTxHashes[i].first);
112 [ # # ]: 0 : std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
113 [ # # ]: 0 : if (idit != shorttxids.end()) {
114 [ # # ][ # # ]: 0 : if (!have_txn[idit->second]) {
115 [ # # ][ # # ]: 0 : txn_available[idit->second] = pool->vTxHashes[i].second->GetSharedTx();
116 [ # # ]: 0 : have_txn[idit->second] = true;
117 : 0 : mempool_count++;
118 : 0 : } else {
119 : : // If we find two mempool txn that match the short id, just request it.
120 : : // This should be rare enough that the extra bandwidth doesn't matter,
121 : : // but eating a round-trip due to FillBlock failure would be annoying
122 [ # # ]: 0 : if (txn_available[idit->second]) {
123 : 0 : txn_available[idit->second].reset();
124 : 0 : mempool_count--;
125 : 0 : }
126 : : }
127 : 0 : }
128 : : // Though ideally we'd continue scanning for the two-txn-match-shortid case,
129 : : // the performance win of an early exit here is too good to pass up and worth
130 : : // the extra risk.
131 [ # # ]: 0 : if (mempool_count == shorttxids.size())
132 : 0 : break;
133 : 0 : }
134 : 0 : }
135 : :
136 [ # # ]: 0 : for (size_t i = 0; i < extra_txn.size(); i++) {
137 [ # # ]: 0 : uint64_t shortid = cmpctblock.GetShortID(extra_txn[i].first);
138 [ # # ]: 0 : std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
139 [ # # ]: 0 : if (idit != shorttxids.end()) {
140 [ # # ][ # # ]: 0 : if (!have_txn[idit->second]) {
141 : 0 : txn_available[idit->second] = extra_txn[i].second;
142 [ # # ]: 0 : have_txn[idit->second] = true;
143 : 0 : mempool_count++;
144 : 0 : extra_count++;
145 : 0 : } else {
146 : : // If we find two mempool/extra txn that match the short id, just
147 : : // request it.
148 : : // This should be rare enough that the extra bandwidth doesn't matter,
149 : : // but eating a round-trip due to FillBlock failure would be annoying
150 : : // Note that we don't want duplication between extra_txn and mempool to
151 : : // trigger this case, so we compare witness hashes first
152 [ # # ][ # # ]: 0 : if (txn_available[idit->second] &&
153 [ # # ][ # # ]: 0 : txn_available[idit->second]->GetWitnessHash() != extra_txn[i].second->GetWitnessHash()) {
[ # # ]
154 : 0 : txn_available[idit->second].reset();
155 : 0 : mempool_count--;
156 : 0 : extra_count--;
157 : 0 : }
158 : : }
159 : 0 : }
160 : : // Though ideally we'd continue scanning for the two-txn-match-shortid case,
161 : : // the performance win of an early exit here is too good to pass up and worth
162 : : // the extra risk.
163 [ # # ]: 0 : if (mempool_count == shorttxids.size())
164 : 0 : break;
165 : 0 : }
166 : :
167 [ # # ][ # # ]: 0 : LogPrint(BCLog::CMPCTBLOCK, "Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of size %lu\n", cmpctblock.header.GetHash().ToString(), GetSerializeSize(cmpctblock, PROTOCOL_VERSION));
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
168 : :
169 : 0 : return READ_STATUS_OK;
170 : 0 : }
171 : :
172 : 0 : bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const
173 : : {
174 [ # # ]: 0 : if (header.IsNull()) return false;
175 : :
176 [ # # ]: 0 : assert(index < txn_available.size());
177 : 0 : return txn_available[index] != nullptr;
178 : 0 : }
179 : :
180 : 0 : ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing)
181 : : {
182 [ # # ]: 0 : if (header.IsNull()) return READ_STATUS_INVALID;
183 : :
184 : 0 : uint256 hash = header.GetHash();
185 : 0 : block = header;
186 : 0 : block.vtx.resize(txn_available.size());
187 : :
188 : 0 : size_t tx_missing_offset = 0;
189 [ # # ]: 0 : for (size_t i = 0; i < txn_available.size(); i++) {
190 [ # # ]: 0 : if (!txn_available[i]) {
191 [ # # ]: 0 : if (vtx_missing.size() <= tx_missing_offset)
192 : 0 : return READ_STATUS_INVALID;
193 : 0 : block.vtx[i] = vtx_missing[tx_missing_offset++];
194 : 0 : } else
195 : 0 : block.vtx[i] = std::move(txn_available[i]);
196 : 0 : }
197 : :
198 : : // Make sure we can't call FillBlock again.
199 : 0 : header.SetNull();
200 : 0 : txn_available.clear();
201 : :
202 [ # # ]: 0 : if (vtx_missing.size() != tx_missing_offset)
203 : 0 : return READ_STATUS_INVALID;
204 : :
205 : 0 : BlockValidationState state;
206 [ # # ][ # # ]: 0 : CheckBlockFn check_block = m_check_block_mock ? m_check_block_mock : CheckBlock;
207 [ # # ][ # # ]: 0 : if (!check_block(block, state, Params().GetConsensus(), /*fCheckPoW=*/true, /*fCheckMerkleRoot=*/true)) {
[ # # ][ # # ]
208 : : // TODO: We really want to just check merkle tree manually here,
209 : : // but that is expensive, and CheckBlock caches a block's
210 : : // "checked-status" (in the CBlock?). CBlock should be able to
211 : : // check its own merkle root and cache that check.
212 [ # # ][ # # ]: 0 : if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED)
213 : 0 : return READ_STATUS_FAILED; // Possible Short ID collision
214 : 0 : return READ_STATUS_CHECKBLOCK_FAILED;
215 : : }
216 : :
217 [ # # ][ # # ]: 0 : LogPrint(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size());
[ # # ][ # # ]
[ # # ][ # # ]
218 [ # # ]: 0 : if (vtx_missing.size() < 5) {
219 [ # # ]: 0 : for (const auto& tx : vtx_missing) {
220 [ # # ][ # # ]: 0 : LogPrint(BCLog::CMPCTBLOCK, "Reconstructed block %s required tx %s\n", hash.ToString(), tx->GetHash().ToString());
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
221 : : }
222 : 0 : }
223 : :
224 : 0 : return READ_STATUS_OK;
225 : 0 : }
|